entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
LayerThreshold
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tw/ctwdeyx3sfgndchtqw6lf5fe4dpjbwow5nqo6huwki3vvakt55df.py # Topologically Sorted Source Nodes: [result], Original ATen: [aten.threshold] # Source node to ATen node mapping: # result => full_default, le, where # Graph fragment: # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg0_1, 0.05842519234815202), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7612752914428711), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %arg0_1), kwargs = {}) triton_poi_fused_threshold_0 = async_compile.triton('triton_poi_fused_threshold_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.05842519234815202 tmp2 = tmp0 <= tmp1 tmp3 = 0.7612752914428711 tmp4 = tl.where(tmp2, tmp3, tmp0) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [result], Original ATen: [aten.threshold] stream0 = get_raw_stream(0) triton_poi_fused_threshold_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class LayerThreshold(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerThreshold, self).__init__() self.threshold = random.random() self.value = self.threshold + random.random() self.thresh = nn.Threshold(self.threshold, self.value) def forward(self, x): x = self.thresh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.05842519234815202 tmp2 = tmp0 <= tmp1 tmp3 = 0.7612752914428711 tmp4 = tl.where(tmp2, tmp3, tmp0) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_threshold_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerThresholdNew(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerThresholdNew, self).__init__() self.threshold = random.random() self.value = self.threshold + random.random() self.thresh = nn.Threshold(self.threshold, self.value) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerThreshold
false
15,149
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
LayerReLU6
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/36/c36w3rtlopsrs3btjlizo5n4q3ytar7mtz2eoqnft735ksvzltlx.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardtanh] # Source node to ATen node mapping: # x => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {}) triton_poi_fused_hardtanh_0 = async_compile.triton('triton_poi_fused_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 6.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardtanh] stream0 = get_raw_stream(0) triton_poi_fused_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerReLU6(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerReLU6, self).__init__() self.relu = nn.ReLU6() def forward(self, x): x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 6.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerReLU6New(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerReLU6New, self).__init__() self.relu = nn.ReLU6() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerReLU6
false
15,150
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
LayerHardtanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wy/cwysx3okemxvtpju3qjlkivlv5pzbcj6wphpmq562kp4jzipxtpy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardtanh] # Source node to ATen node mapping: # x => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0.6295403873549253), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.1090100075602378), kwargs = {}) triton_poi_fused_hardtanh_0 = async_compile.triton('triton_poi_fused_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.6295403873549253 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.1090100075602378 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.hardtanh] stream0 = get_raw_stream(0) triton_poi_fused_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class LayerHardtanh(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerHardtanh, self).__init__() self.min_val = random.random() self.max_val = self.min_val + random.random() self.htanh = nn.Hardtanh(min_val=self.min_val, max_val=self.max_val) def forward(self, x): x = self.htanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.6295403873549253 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.1090100075602378 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerHardtanhNew(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerHardtanhNew, self).__init__() self.min_val = random.random() self.max_val = self.min_val + random.random() self.htanh = nn.Hardtanh(min_val=self.min_val, max_val=self.max_val) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerHardtanh
false
15,151
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
ffnn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add] # Source node to ATen node mapping: # outputs => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf1, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn def get_shape(t): return list(t.shape) class ffnn(nn.Module): def __init__(self, emb_size, num_layers, hidden_size, output_size, dropout, output_weights_initializer=None): super(ffnn, self).__init__() self.dropout = torch.nn.Dropout(p=dropout) self.weights = nn.Parameter(torch.Tensor(emb_size, output_size)) self.bias = nn.Parameter(torch.Tensor(output_size)) self.activation = torch.nn.ReLU() self.num_layers = num_layers self.emb_size = emb_size self.hidden_size = hidden_size self.output_size = output_size self.initializer = output_weights_initializer self.initialize() def initialize(self): if self.initializer is None: torch.nn.init.xavier_uniform_(self.weights, gain=1) else: self.initializer(self.weights, gain=1) nn.init.zeros_(self.bias) def forward(self, inputs): current_inputs = inputs if len(get_shape(inputs)) == 3: batch_size, seqlen, emb_size = get_shape(inputs) current_inputs = inputs.reshape(batch_size * seqlen, emb_size) emb_size = get_shape(current_inputs)[-1] assert emb_size == self.emb_size, 'last dim of input does not match this layer' outputs = current_inputs.matmul(self.weights) + self.bias if len(get_shape(inputs)) == 3: outputs = outputs.reshape(batch_size, seqlen, self.output_size) return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'emb_size': 4, 'num_layers': 1, 'hidden_size': 4, 'output_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def get_shape(t): return list(t.shape) class ffnnNew(nn.Module): def __init__(self, emb_size, num_layers, hidden_size, output_size, dropout, output_weights_initializer=None): super(ffnnNew, self).__init__() self.dropout = torch.nn.Dropout(p=dropout) self.weights = nn.Parameter(torch.Tensor(emb_size, output_size)) self.bias = nn.Parameter(torch.Tensor(output_size)) self.activation = torch.nn.ReLU() self.num_layers = num_layers self.emb_size = emb_size self.hidden_size = hidden_size self.output_size = output_size self.initializer = output_weights_initializer self.initialize() def initialize(self): if self.initializer is None: torch.nn.init.xavier_uniform_(self.weights, gain=1) else: self.initializer(self.weights, gain=1) nn.init.zeros_(self.bias) def forward(self, input_0): primals_2 = self.weights primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
db-bionlp/CLNER
ffnn
false
15,152
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
LayerELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/si/csit76acj4nxc2ickliwq6ukjbyi5segckn6n3yseelkf53uppgc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] # Source node to ATen node mapping: # x => expm1, gt, mul, mul_1, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 0.44698668752580917), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = 0.44698668752580917 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp2, tmp4, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class LayerELU(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerELU, self).__init__() self.alpha = random.random() self.elu = nn.ELU(alpha=self.alpha) def forward(self, x): x = self.elu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = 0.44698668752580917 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp2, tmp4, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerELUNew(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerELUNew, self).__init__() self.alpha = random.random() self.elu = nn.ELU(alpha=self.alpha) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerELU
false
15,153
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
VoxelFeatureExtractor
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/f2/cf2faolme3wnnwp6icjm6dfbqfd7sk3y4me6nfknaftn55eci6dj.py # Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # sum_1 => sum_1 # truediv => div # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %view), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class VoxelFeatureExtractor(nn.Module): """Computes mean of non-zero points within voxel.""" def forward(self, feature, occupancy): """ :feature FloatTensor of shape (N, K, C) :return FloatTensor of shape (N, C) """ denominator = occupancy.type_as(feature).view(-1, 1) feature = (feature.sum(1) / denominator).contiguous() return feature def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class VoxelFeatureExtractorNew(nn.Module): """Computes mean of non-zero points within voxel.""" def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dd-iuonac/vision3d
VoxelFeatureExtractor
false
15,154
[ "MIT" ]
131
9ea514c80eb99d265c3247321e59bfc1c2ccd94a
https://github.com/dd-iuonac/vision3d/tree/9ea514c80eb99d265c3247321e59bfc1c2ccd94a
ScalarMix
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2d/c2durcbnmxziv6ccmw4s5yb6zlayyf7itflrryxft2ddkko7o6q4.py # Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # weighted_sum => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %add), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp7 = tl.load(in_ptr2 + (x0), xmask) tmp4 = tmp3 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tmp8 + tmp9 tmp11 = tmp1 * tmp10 tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_3, primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0) return (buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class ScalarMix(nn.Module): def __init__(self, n_layers, dropout=0): super(ScalarMix, self).__init__() self.n_layers = n_layers self.dropout = dropout self.weights = nn.Parameter(torch.zeros(n_layers)) self.gamma = nn.Parameter(torch.tensor([1.0])) self.dropout = nn.Dropout(dropout) def extra_repr(self): s = f'n_layers={self.n_layers}' if self.dropout.p > 0: s += f', dropout={self.dropout.p}' return s def forward(self, tensors): normed_weights = self.dropout(self.weights.softmax(-1)) weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors)) return self.gamma * weighted_sum def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_layers': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp4 = tmp3 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tmp8 + tmp9 tmp11 = tmp1 * tmp10 tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 0) class ScalarMixNew(nn.Module): def __init__(self, n_layers, dropout=0): super(ScalarMixNew, self).__init__() self.n_layers = n_layers self.dropout = dropout self.weights = nn.Parameter(torch.zeros(n_layers)) self.gamma = nn.Parameter(torch.tensor([1.0])) self.dropout = nn.Dropout(dropout) def extra_repr(self): s = f'n_layers={self.n_layers}' if self.dropout.p > 0: s += f', dropout={self.dropout.p}' return s def forward(self, input_0): primals_1 = self.weights primals_3 = self.gamma primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
db-bionlp/CLNER
ScalarMix
false
15,155
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
EmissionModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/le/clewmq2oyakpojeemfsrrjq5tneb2unj5om75r32lnu3wfwo4lbd.py # Topologically Sorted Source Nodes: [emission_matrix], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # emission_matrix => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cy/ccyycaxchymjoxhltszs3dbtn4c52uyfl2bfed3ws7c3ovgyk3ng.py # Topologically Sorted Source Nodes: [emission_matrix, getitem], Original ATen: [aten._log_softmax, aten.index] # Source node to ATen node mapping: # emission_matrix => exp, log, sub_1, sum_1 # getitem => index # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%sub_1, [None, %primals_2]), kwargs = {}) triton_poi_fused__log_softmax_index_1 = async_compile.triton('triton_poi_fused__log_softmax_index_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [emission_matrix], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [emission_matrix, getitem], Original ATen: [aten._log_softmax, aten.index] triton_poi_fused__log_softmax_index_1.run(primals_2, buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 return (reinterpret_tensor(buf1, (4, 4), (1, 4), 0), primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class EmissionModel(torch.nn.Module): """ - forward(): computes the log probability of an observation. - sample(): given a state, sample an observation for that state. """ def __init__(self, N, M): super(EmissionModel, self).__init__() self.N = N self.M = M self.unnormalized_emission_matrix = torch.nn.Parameter(torch.randn( N, M)) def forward(self, x_t): """ x_t : LongTensor of shape (batch size) Get observation probabilities """ emission_matrix = torch.nn.functional.log_softmax(self. unnormalized_emission_matrix, dim=1) out = emission_matrix[:, x_t].transpose(0, 1) return out def get_inputs(): return [torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'N': 4, 'M': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_index_1[grid(16)](primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 4), (1, 4), 0), primals_1, primals_2 class EmissionModelNew(torch.nn.Module): """ - forward(): computes the log probability of an observation. - sample(): given a state, sample an observation for that state. """ def __init__(self, N, M): super(EmissionModelNew, self).__init__() self.N = N self.M = M self.unnormalized_emission_matrix = torch.nn.Parameter(torch.randn( N, M)) def forward(self, input_0): primals_1 = self.unnormalized_emission_matrix primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dendisuhubdy/pytorch_HMM
EmissionModel
false
15,156
[ "Apache-2.0" ]
88
3235326027328e1b0377b17f9dad8fcc56a3668c
https://github.com/dendisuhubdy/pytorch_HMM/tree/3235326027328e1b0377b17f9dad8fcc56a3668c
BiaffineAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qt/cqtn54x53z5e5qza53u36hhsftfflvdqwoao2xnj7xnw5ocdcx3f.py # Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # add => add # add_1 => add_1 # output_2 => add_2 # output_3 => amax # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %view_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %permute_3), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_10), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [1], True), kwargs = {}) triton_poi_fused__softmax_add_0 = async_compile.triton('triton_poi_fused__softmax_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), xmask) tmp5 = tl.load(in_ptr3 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp10 + tmp3 tmp12 = tmp11 + tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp14 + tmp15 tmp17 = tmp16 + tmp3 tmp18 = tmp17 + tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp20 + tmp21 tmp23 = tmp22 + tmp3 tmp24 = tmp23 + tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tl.store(out_ptr0 + (x0), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ia/ciady5wligt2qz2veuk2j2mq4euwgkz2msjneq6me7apizr4flue.py # Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # add => add # add_1 => add_1 # output_2 => add_2 # output_3 => exp, sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %view_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %permute_3), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_10), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tl.store(in_out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax] # Source node to ATen node mapping: # output_3 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (150, 4), (4, 1)) assert_size_stride(primals_4, (150, ), (1, )) assert_size_stride(primals_5, (150, 4), (4, 1)) assert_size_stride(primals_6, (150, ), (1, )) assert_size_stride(primals_7, (1, 150), (150, 1)) assert_size_stride(primals_8, (1, 150), (150, 1)) assert_size_stride(primals_9, (1, 150, 150), (22500, 150, 1)) assert_size_stride(primals_10, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 150), (150, 1), torch.float32) # Topologically Sorted Source Nodes: [input_s], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor(primals_3, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 150), (150, 1), torch.float32) # Topologically Sorted Source Nodes: [input_t], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(primals_7, reinterpret_tensor(buf0, (150, 4), (1, 150), 0), out=buf2) buf3 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [out_d], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_8, (1, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (1, 150, 16), (0, 1, 150), 0), out=buf3) buf4 = empty_strided_cuda((1, 4, 150), (600, 150, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 150), (600, 150, 1), 0), primals_9, out=buf4) buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (4, 150, 4), (600, 1, 150), 0), out=buf5) buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_add_0.run(buf5, buf3, buf2, primals_10, buf6, 4, grid=grid(4), stream=stream0) buf7 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [add, add_1, output_2, output_3], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(buf7, buf5, buf2, primals_10, buf6, 16, grid=grid(16), stream=stream0) del buf2 del buf6 del primals_10 buf8 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, buf8, 16, grid=grid(16), stream=stream0) del buf7 return (buf8, primals_1, primals_7, buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf1, buf8, reinterpret_tensor(buf4, (4, 150, 1), (150, 1, 150), 0), reinterpret_tensor(primals_9, (1, 150, 150), (22500, 1, 150), 0), reinterpret_tensor(primals_8, (1, 150, 1), (150, 1, 150), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((150, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((150, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((150, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((150, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 150), (150, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 150), (150, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 150, 150), (22500, 150, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.nn class BiaffineAttention(nn.Module): """ Adopted from NeuroNLP2: https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py Bi-Affine attention layer. """ def __init__(self, input_size_encoder, input_size_decoder, hidden_size= 150, num_labels=1, biaffine=True, **kwargs): """ Args: input_size_encoder: int the dimension of the encoder input. input_size_decoder: int the dimension of the decoder input. num_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiaffineAttention, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.hidden_size = hidden_size self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self .hidden_size) self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self .hidden_size) self.num_labels = num_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size)) self.b = Parameter(torch.Tensor(1, self.num_labels)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. hidden_size, self.hidden_size)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.W_d) nn.init.xavier_normal_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_normal_(self.U) def forward(self, input_s, input_t, mask_d=None, mask_e=None): """ Args: input_s: Tensor the student input tensor with shape = [batch, input_size] input_t: Tensor the teacher input tensor with shape = [batch, num_teachers, input_size] mask_d: None mask_e: None Returns: Tensor the energy tensor with shape = [batch, length] """ assert input_s.size(0) == input_t.size(0 ), 'batch sizes of encoder and decoder are requires to be equal.' input_s.size() _, _num_teachers, _ = input_t.size() input_s = self.linear_encoder(input_s) input_t = self.linear_decoder(input_t) out_e = torch.matmul(self.W_e, input_s.transpose(1, 0)).transpose(1, 0) out_d = torch.einsum('nd,bnd->bn', self.W_d, input_t) if self.biaffine: output = torch.einsum('bd,nde->bne', input_s, self.U) output = torch.einsum('bne,bne->bn', output, input_t) output = output + out_d + out_e + self.b else: output = out_d + out_d + self.b if mask_d is not None and mask_e is not None: output = output * mask_d.unsqueeze(1).unsqueeze(3 ) * mask_e.unsqueeze(1).unsqueeze(2) output = torch.nn.functional.softmax(output, 1) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size_encoder': 4, 'input_size_decoder': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data.dataloader from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask) tmp5 = tl.load(in_ptr3 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp10 + tmp3 tmp12 = tmp11 + tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp14 + tmp15 tmp17 = tmp16 + tmp3 tmp18 = tmp17 + tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp20 + tmp21 tmp23 = tmp22 + tmp3 tmp24 = tmp23 + tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp4 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tl.store(in_out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (150, 4), (4, 1)) assert_size_stride(primals_4, (150,), (1,)) assert_size_stride(primals_5, (150, 4), (4, 1)) assert_size_stride(primals_6, (150,), (1,)) assert_size_stride(primals_7, (1, 150), (150, 1)) assert_size_stride(primals_8, (1, 150), (150, 1)) assert_size_stride(primals_9, (1, 150, 150), (22500, 150, 1)) assert_size_stride(primals_10, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 150), (150, 1), torch.float32) extern_kernels.addmm(primals_4, primals_1, reinterpret_tensor( primals_3, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 150), (150, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 150), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(primals_7, reinterpret_tensor(buf0, (150, 4), (1, 150), 0), out=buf2) buf3 = empty_strided_cuda((1, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_8, (1, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (1, 150, 16), (0, 1, 150), 0), out=buf3) buf4 = empty_strided_cuda((1, 4, 150), (600, 150, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 150), (600, 150, 1), 0), primals_9, out=buf4) buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 150), (150, 150, 1), 0), reinterpret_tensor(buf1, (4, 150, 4), (600, 1, 150), 0), out=buf5) buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_0[grid(4)](buf5, buf3, buf2, primals_10, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused__softmax_add_1[grid(16)](buf7, buf5, buf2, primals_10, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf6 del primals_10 buf8 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(16)](buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf7 return buf8, primals_1, primals_7, buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf1, buf8, reinterpret_tensor(buf4, (4, 150, 1), (150, 1, 150), 0), reinterpret_tensor(primals_9, (1, 150, 150), (22500, 1, 150), 0), reinterpret_tensor(primals_8, (1, 150, 1), ( 150, 1, 150), 0) class BiaffineAttentionNew(nn.Module): """ Adopted from NeuroNLP2: https://github.com/XuezheMax/NeuroNLP2/blob/master/neuronlp2/nn/modules/attention.py Bi-Affine attention layer. """ def __init__(self, input_size_encoder, input_size_decoder, hidden_size= 150, num_labels=1, biaffine=True, **kwargs): """ Args: input_size_encoder: int the dimension of the encoder input. input_size_decoder: int the dimension of the decoder input. num_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: """ super(BiaffineAttentionNew, self).__init__() self.input_size_encoder = input_size_encoder self.input_size_decoder = input_size_decoder self.hidden_size = hidden_size self.linear_encoder = torch.nn.Linear(self.input_size_encoder, self .hidden_size) self.linear_decoder = torch.nn.Linear(self.input_size_decoder, self .hidden_size) self.num_labels = num_labels self.biaffine = biaffine self.W_d = Parameter(torch.Tensor(self.num_labels, self.hidden_size)) self.W_e = Parameter(torch.Tensor(self.num_labels, self.hidden_size)) self.b = Parameter(torch.Tensor(1, self.num_labels)) if self.biaffine: self.U = Parameter(torch.Tensor(self.num_labels, self. hidden_size, self.hidden_size)) else: self.register_parameter('U', None) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.W_d) nn.init.xavier_normal_(self.W_e) nn.init.constant_(self.b, 0.0) if self.biaffine: nn.init.xavier_normal_(self.U) def forward(self, input_0, input_1): primals_7 = self.W_d primals_8 = self.W_e primals_10 = self.b primals_9 = self.U primals_3 = self.linear_encoder.weight primals_4 = self.linear_encoder.bias primals_5 = self.linear_decoder.weight primals_6 = self.linear_decoder.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
db-bionlp/CLNER
BiaffineAttention
false
15,157
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
HDRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qb/cqbfnmlt6svijulw225qiycrjo6hdtli6ovr4b3x5j67ws4i7vkk.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%view,), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.01 tmp5 = tmp0 + tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class HDRLoss(nn.Module): """High dynamic range loss.""" def __init__(self, eps=0.01): """Initializes loss with numerical stability epsilon.""" super(HDRLoss, self).__init__() self._eps = eps def forward(self, denoised, target): """Computes loss by unpacking render buffer.""" loss = (denoised - target) ** 2 / (denoised + self._eps) ** 2 return torch.mean(loss.view(-1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.01 tmp5 = tmp0 + tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HDRLossNew(nn.Module): """High dynamic range loss.""" def __init__(self, eps=0.01): """Initializes loss with numerical stability epsilon.""" super(HDRLossNew, self).__init__() self._eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
delldu/Noise2Noise
HDRLoss
false
15,158
[ "MIT" ]
224
f519f208776a60efadac208c109c9b7f432504b5
https://github.com/delldu/Noise2Noise/tree/f519f208776a60efadac208c109c9b7f432504b5
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.distributions import transforms as transform class Flow(transform.Transform, nn.Module): """ Main class for a single flow. """ def __init__(self, amortized='none'): """ Initialize as both transform and module """ transform.Transform.__init__(self) nn.Module.__init__(self) self.amortized = amortized def init_parameters(self): """ Initialize internal parameters (sub-modules) """ for param in self.parameters(): param.data.uniform_(-0.001, 0.001) def __hash__(self): """ Dirty hack to ensure nn.Module compatibility """ return nn.Module.__hash__(self) def set_parameters(self, params, batch_dim): """ Set parameters values (sub-modules) """ pass def n_parameters(self): """ Return number of parameters in flow """ return 0 class ActNormFlow(Flow): """ An implementation of the activation normalization layer defined in Glow: Generative Flow with Invertible 1x1 Convolutions (https://arxiv.org/abs/1807.03039). """ def __init__(self, dim, amortized='none'): super(ActNormFlow, self).__init__() self.weight = [] self.bias = [] self.amortized = amortized self.weight = amortized_ones(amortized, (1, dim, 1, 1)) self.bias = amortized_zeros(amortized, (1, dim, 1, 1)) self.initialized = False self.dim = dim def _call(self, z): return z * torch.exp(self.weight) + self.bias def _inverse(self, z): return (z - self.bias) * torch.exp(-self.weight) def log_abs_det_jacobian(self, z): if self.initialized is False: self.bias.data.copy_(z.mean((0, 2, 3), keepdim=True) * -1) self.weight.data.copy_(torch.log(1.0 / (torch.sqrt(((z + self. bias.data) ** 2).mean((0, 2, 3), keepdim=True)) + 1e-06))) self.initialized = True return torch.sum(self.weight).repeat(z.shape[0], 1) * z.shape[2 ] * z.shape[3] def set_parameters(self, params, batch_dim): """ Set parameters values (sub-modules) """ if self.amortized != 'none': self.weight = params[:, :self.dim ** 2] self.bias = params[:, self.dim ** 2:self.dim ** 2 * 2] def n_parameters(self): """ Return number of parameters in flow """ return self.dim * 2 class Conv2d(nn.Conv2d): pad_dict = {'same': lambda kernel, stride: [(((k - 1) * s + 1) // 2) for k, s in zip(kernel, stride)], 'valid': lambda kernel, stride: [(0) for _ in kernel]} @staticmethod def get_padding(padding, kernel_size, stride): if isinstance(padding, str): if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if isinstance(stride, int): stride = [stride, stride] padding = padding.lower() try: padding = Conv2d.pad_dict[padding](kernel_size, stride) except KeyError: raise ValueError('{} is not supported'.format(padding)) return padding def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', do_actnorm=False, weight_std=0.001): padding = Conv2d.get_padding(padding, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, bias=not do_actnorm) self.weight.data.normal_(mean=0.0, std=weight_std) if not do_actnorm: self.bias.data.zero_() else: self.actnorm = ActNormFlow(out_channels) self.do_actnorm = do_actnorm def forward(self, input): x = super().forward(input) if self.do_actnorm: x = self.actnorm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.distributions import transforms as transform assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Flow(transform.Transform, nn.Module): """ Main class for a single flow. """ def __init__(self, amortized='none'): """ Initialize as both transform and module """ transform.Transform.__init__(self) nn.Module.__init__(self) self.amortized = amortized def init_parameters(self): """ Initialize internal parameters (sub-modules) """ for param in self.parameters(): param.data.uniform_(-0.001, 0.001) def __hash__(self): """ Dirty hack to ensure nn.Module compatibility """ return nn.Module.__hash__(self) def set_parameters(self, params, batch_dim): """ Set parameters values (sub-modules) """ pass def n_parameters(self): """ Return number of parameters in flow """ return 0 class ActNormFlow(Flow): """ An implementation of the activation normalization layer defined in Glow: Generative Flow with Invertible 1x1 Convolutions (https://arxiv.org/abs/1807.03039). """ def __init__(self, dim, amortized='none'): super(ActNormFlow, self).__init__() self.weight = [] self.bias = [] self.amortized = amortized self.weight = amortized_ones(amortized, (1, dim, 1, 1)) self.bias = amortized_zeros(amortized, (1, dim, 1, 1)) self.initialized = False self.dim = dim def _call(self, z): return z * torch.exp(self.weight) + self.bias def _inverse(self, z): return (z - self.bias) * torch.exp(-self.weight) def log_abs_det_jacobian(self, z): if self.initialized is False: self.bias.data.copy_(z.mean((0, 2, 3), keepdim=True) * -1) self.weight.data.copy_(torch.log(1.0 / (torch.sqrt(((z + self. bias.data) ** 2).mean((0, 2, 3), keepdim=True)) + 1e-06))) self.initialized = True return torch.sum(self.weight).repeat(z.shape[0], 1) * z.shape[2 ] * z.shape[3] def set_parameters(self, params, batch_dim): """ Set parameters values (sub-modules) """ if self.amortized != 'none': self.weight = params[:, :self.dim ** 2] self.bias = params[:, self.dim ** 2:self.dim ** 2 * 2] def n_parameters(self): """ Return number of parameters in flow """ return self.dim * 2 class Conv2dNew(nn.Conv2d): pad_dict = {'same': lambda kernel, stride: [(((k - 1) * s + 1) // 2) for k, s in zip(kernel, stride)], 'valid': lambda kernel, stride: [(0) for _ in kernel]} @staticmethod def get_padding(padding, kernel_size, stride): if isinstance(padding, str): if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] if isinstance(stride, int): stride = [stride, stride] padding = padding.lower() try: padding = Conv2dNew.pad_dict[padding](kernel_size, stride) except KeyError: raise ValueError('{} is not supported'.format(padding)) return padding def __init__(self, in_channels, out_channels, kernel_size=[3, 3], stride=[1, 1], padding='same', do_actnorm=False, weight_std=0.001): padding = Conv2dNew.get_padding(padding, kernel_size, stride) super().__init__(in_channels, out_channels, kernel_size, stride, padding, bias=not do_actnorm) self.weight.data.normal_(mean=0.0, std=weight_std) if not do_actnorm: self.bias.data.zero_() else: self.actnorm = ActNormFlow(out_channels) self.do_actnorm = do_actnorm def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dendisuhubdy/flow_synthesizer
Conv2d
false
15,159
[ "MIT" ]
93
1561e8ce2520258acb3d228beebbb626a8abc04f
https://github.com/dendisuhubdy/flow_synthesizer/tree/1561e8ce2520258acb3d228beebbb626a8abc04f
cnn_layer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xr/cxrzuhg4v2k43m6sohikpdt2rwzcj5ayuvu3xeyydisg34liifbv.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1), (4, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 4, grid=grid(4), stream=stream0) del primals_2 return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class cnn_layer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(cnn_layer, self).__init__() self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, bias=bias) self.relu = torch.nn.ReLU() def forward(self, input): return self.relu(self.conv(input)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1), (4, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4)](buf1, primals_2, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2 class cnn_layerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(cnn_layerNew, self).__init__() self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, bias=bias) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
db-bionlp/CLNER
cnn_layer
false
15,160
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
bilinear_classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6e/c6ek4vaz4ftzm6ataust6sidvnoeklculi3ag7tscvwlv5jvr5k5.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.view] # Source node to ATen node mapping: # x => cat # x_1 => view # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], 2), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat, [-1, 5]), kwargs = {}) triton_poi_fused_cat_view_0 = async_compile.triton('triton_poi_fused_cat_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_cat_view_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20, 1), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat] triton_poi_fused_cat_view_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 1, 4), 0), buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, 4, 5), (20, 5, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class Sparse_dropout(nn.Module): def __init__(self, p): super(Sparse_dropout, self).__init__() self.dropout_rate = p def forward(self, input, noise_shape): if not self.training: return input shapes = input.shape noise_shape = list(noise_shape) broadcast_dims = [] for idx, dim_pair in enumerate(zip(shapes, noise_shape)): if dim_pair[1] > 1: broadcast_dims.append((idx, dim_pair[0])) mask_dims = [] for dim in broadcast_dims: mask_dims.append(dim[1]) mask = torch.bernoulli((torch.ones(mask_dims, device=input.device) * (1 - self.dropout_rate)).reshape(noise_shape)) * (1 / (1 - self .dropout_rate)) mask return input * mask class biaffine_mapping(nn.Module): def __init__(self, input_size_x, input_size_y, output_size, bias_x, bias_y, initializer=None): super(biaffine_mapping, self).__init__() self.bias_x = bias_x self.bias_y = bias_y self.output_size = output_size self.initilizer = None if self.bias_x: input_size1 = input_size_x + 1 input_size2 = input_size_y + 1 self.biaffine_map = nn.Parameter(torch.Tensor(input_size1, output_size, input_size2)) self.initialize() def initialize(self): if self.initilizer is None: torch.nn.init.orthogonal_(self.biaffine_map) else: self.initilizer(self.biaffine_map) def forward(self, x, y): batch_size, bucket_size = x.shape[0], x.shape[1] if self.bias_x: x = torch.cat([x, torch.ones([batch_size, bucket_size, 1], device=x.device)], axis=2) if self.bias_y: y = torch.cat([y, torch.ones([batch_size, bucket_size, 1], device=y.device)], axis=2) x_set_size, y_set_size = x.shape[-1], y.shape[-1] x = x.reshape(-1, x_set_size) biaffine_map = self.biaffine_map.reshape(x_set_size, -1) biaffine_mapping = torch.matmul(x, biaffine_map).reshape(batch_size, -1, y_set_size) biaffine_mapping = biaffine_mapping.bmm(torch.transpose(y, 1, 2) ).reshape(batch_size, bucket_size, self.output_size, bucket_size) biaffine_mapping = biaffine_mapping.transpose(2, 3) return biaffine_mapping class bilinear_classifier(nn.Module): def __init__(self, dropout, input_size_x, input_size_y, output_size, bias_x=True, bias_y=True): super(bilinear_classifier, self).__init__() self.dropout_rate = dropout self.output_size = output_size self.dropout = Sparse_dropout(p=self.dropout_rate) self.biaffine = biaffine_mapping(input_size_x, input_size_y, output_size, bias_x, bias_y) def forward(self, x_bnv, y_bnv): batch_size, input_size_x = x_bnv.shape[0], x_bnv.shape[-1] input_size_y = y_bnv.shape[-1] noise_shape_x = [batch_size, 1, input_size_x] noise_shape_y = [batch_size, 1, input_size_y] x = self.dropout(x_bnv, noise_shape_x) y = self.dropout(y_bnv, noise_shape_y) output = self.biaffine(x, y) if self.output_size == 1: output = output.squeeze(-1) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dropout': 0.5, 'input_size_x': 4, 'input_size_y': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (5, 4, 5), (20, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_view_0[grid(80)](primals_1, buf0, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 20), (20, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (5, 20), (20, 1), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_cat_view_0[grid(80)](primals_2, buf2, 80, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 5), (80, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 1, 4), 0 ), buf2, reinterpret_tensor(buf0, (5, 16), (1, 5), 0) class Sparse_dropout(nn.Module): def __init__(self, p): super(Sparse_dropout, self).__init__() self.dropout_rate = p def forward(self, input, noise_shape): if not self.training: return input shapes = input.shape noise_shape = list(noise_shape) broadcast_dims = [] for idx, dim_pair in enumerate(zip(shapes, noise_shape)): if dim_pair[1] > 1: broadcast_dims.append((idx, dim_pair[0])) mask_dims = [] for dim in broadcast_dims: mask_dims.append(dim[1]) mask = torch.bernoulli((torch.ones(mask_dims, device=input.device) * (1 - self.dropout_rate)).reshape(noise_shape)) * (1 / (1 - self .dropout_rate)) mask return input * mask class biaffine_mapping(nn.Module): def __init__(self, input_size_x, input_size_y, output_size, bias_x, bias_y, initializer=None): super(biaffine_mapping, self).__init__() self.bias_x = bias_x self.bias_y = bias_y self.output_size = output_size self.initilizer = None if self.bias_x: input_size1 = input_size_x + 1 input_size2 = input_size_y + 1 self.biaffine_map = nn.Parameter(torch.Tensor(input_size1, output_size, input_size2)) self.initialize() def initialize(self): if self.initilizer is None: torch.nn.init.orthogonal_(self.biaffine_map) else: self.initilizer(self.biaffine_map) def forward(self, x, y): batch_size, bucket_size = x.shape[0], x.shape[1] if self.bias_x: x = torch.cat([x, torch.ones([batch_size, bucket_size, 1], device=x.device)], axis=2) if self.bias_y: y = torch.cat([y, torch.ones([batch_size, bucket_size, 1], device=y.device)], axis=2) x_set_size, y_set_size = x.shape[-1], y.shape[-1] x = x.reshape(-1, x_set_size) biaffine_map = self.biaffine_map.reshape(x_set_size, -1) biaffine_mapping = torch.matmul(x, biaffine_map).reshape(batch_size, -1, y_set_size) biaffine_mapping = biaffine_mapping.bmm(torch.transpose(y, 1, 2) ).reshape(batch_size, bucket_size, self.output_size, bucket_size) biaffine_mapping = biaffine_mapping.transpose(2, 3) return biaffine_mapping class bilinear_classifierNew(nn.Module): def __init__(self, dropout, input_size_x, input_size_y, output_size, bias_x=True, bias_y=True): super(bilinear_classifierNew, self).__init__() self.dropout_rate = dropout self.output_size = output_size self.dropout = Sparse_dropout(p=self.dropout_rate) self.biaffine = biaffine_mapping(input_size_x, input_size_y, output_size, bias_x, bias_y) def forward(self, input_0, input_1): primals_3 = self.biaffine.biaffine_map primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
db-bionlp/CLNER
bilinear_classifier
false
15,161
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
LSID
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vy/cvyepqnfv2zvm5e4dhrkyumfr7u6dg4xv2za3tbeoexiapsex34f.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i5/ci5bmbeqc2upwqbmt7aboh3u2ftpl7b7hapx57gnkimqppb75bsl.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lv/clvb6e5wwua3rgienmiy74f55jldgelldptpx3d52s5w5heimwjn.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_5 => convolution_2 # x_6 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ci/ccivqnsqyre4mgo3xriiaazl7k3nibazipio4bpmlfha6ubxxhlr.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_9 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/ctsf6ostfmtes56q36yedyyr7nnynihzeg4smj2abyycydgbqn5y.py # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_10 => convolution_4 # x_11 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ve/cveknj6a2gbamc2orbuvgffp3qowwq4eqcm6caa54u5szaselw7v.py # Topologically Sorted Source Nodes: [x_12, x_13, x_14], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_12 => convolution_5 # x_13 => gt_5, mul_5, where_5 # x_14 => getitem_4, getitem_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.2), kwargs = {}) # %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {}) # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i8', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.full([1], 0, tl.int64) tmp9 = tmp8 >= tmp8 tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tmp12 & tmp12 tmp14 = tmp10 >= tmp8 tmp15 = tmp10 < tmp10 tmp16 = tmp14 & tmp15 tmp17 = tmp12 & tmp16 tmp18 = triton_helpers.maximum(tmp7, tmp7) tmp19 = tmp16 & tmp12 tmp20 = triton_helpers.maximum(tmp7, tmp18) tmp21 = tmp16 & tmp16 tmp22 = triton_helpers.maximum(tmp7, tmp20) tmp23 = tmp7 > tmp7 tmp24 = tl.full([1], 1, tl.int8) tmp25 = tl.full([1], 0, tl.int8) tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tmp7 > tmp18 tmp28 = tl.full([1], 2, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp7 > tmp20 tmp31 = tl.full([1], 3, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tl.store(in_out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kr/ckrovzxohsppjdjbwlm7vydnedcmhlptuiex3ipurrmeka35tzjh.py # Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_15 => convolution_6 # x_16 => gt_6, mul_6, where_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.2), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ft/cftuqath3q7g4y3fsbmfmgxs6q3wfdttzlxmk2lo3fxr3hv44jhf.py # Topologically Sorted Source Nodes: [x_17, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_17 => convolution_7 # x_18 => gt_7, mul_7, where_7 # x_19 => getitem_6, getitem_7 # Graph fragment: # %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 0.2), kwargs = {}) # %where_7 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %convolution_7, %mul_7), kwargs = {}) # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i8', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.full([1], 0, tl.int64) tmp9 = tmp8 >= tmp8 tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tmp12 & tmp12 tmp14 = tmp10 >= tmp8 tmp15 = tmp10 < tmp10 tmp16 = tmp14 & tmp15 tmp17 = tmp12 & tmp16 tmp18 = triton_helpers.maximum(tmp7, tmp7) tmp19 = tmp16 & tmp12 tmp20 = triton_helpers.maximum(tmp7, tmp18) tmp21 = tmp16 & tmp16 tmp22 = triton_helpers.maximum(tmp7, tmp20) tmp23 = tmp7 > tmp7 tmp24 = tl.full([1], 1, tl.int8) tmp25 = tl.full([1], 0, tl.int8) tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tmp7 > tmp18 tmp28 = tl.full([1], 2, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp7 > tmp20 tmp31 = tl.full([1], 3, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tl.store(in_out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr0 + (x2), tmp22, xmask) tl.store(out_ptr1 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bu/cbud4un5il2p2q6a2golnodgakkneqctkb7n5xqp76h67aumolea.py # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_20 => convolution_8 # x_21 => gt_8, mul_8, where_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.2), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {}) triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7i/c7inuq5qyzjbmsyrrqqu43onrxjqp3bleeijjw7auwqfjimkjdil.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_25 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %where_7], 1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = (xindex // 512) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x0) + (1024*x1)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 512, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((256*x1) + ((-256) + x0)), tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/h3/ch3vt6asi7hhksvzhldpy5hifrglv7tmtqod44fw5h5lbyhk4eac.py # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_31 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_8, %where_5], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x0) + (512*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 256, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((128*x1) + ((-128) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lo/clomp6got7qtzig7a2f3x5okxgooa3ee7ols53qk5dl66gnht7uu.py # Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_37 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %where_3], 1), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4) % 128 x0 = xindex % 4 x2 = (xindex // 512) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (256*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 128, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4*((-64) + x1)) + (256*x2)), tmp6, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gt/cgt6r4vqu7wkgjn5cpyenm75u6mnu53ovkgjmndgjhjkdpfjgsgk.py # Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_43 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_19, %where_1], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 16) % 64 x0 = xindex % 16 x2 = (xindex // 1024) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (512*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 64, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-32) + x1)) + (512*x2)), tmp6, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xq/cxqnmyfj5ehtxzvxxyo57fwakt4vb2h5ohhhnfsmamjrifclohk6.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_13 = async_compile.triton('triton_poi_fused_clone_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 8], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 6 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y1 = (yindex // 4) % 2 y2 = (yindex // 8) % 4 y3 = (yindex // 32) y6 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*y2) + (16*x4) + (96*y1) + (192*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + (6*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4 + (6*y6)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_24, (256, ), (1, )) assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_26, (256, ), (1, )) assert_size_stride(primals_27, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_28, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128, ), (1, )) assert_size_stride(primals_32, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_33, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_34, (64, ), (1, )) assert_size_stride(primals_35, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_36, (64, ), (1, )) assert_size_stride(primals_37, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_38, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (32, ), (1, )) assert_size_stride(primals_40, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_41, (32, ), (1, )) assert_size_stride(primals_42, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_43, (12, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 2048, grid=grid(2048), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 2, 2), (256, 4, 2, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, 1024, grid=grid(1024), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 2, 2), (256, 4, 2, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf9, primals_9, 1024, grid=grid(1024), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 1, 1), (128, 1, 1, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf13, primals_11, 512, grid=grid(512), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 1, 1), (128, 1, 1, 1)) buf15 = buf14; del buf14 # reuse buf16 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.float32) buf17 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.int8) # Topologically Sorted Source Nodes: [x_12, x_13, x_14], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_pool2d_with_indices] triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5.run(buf15, primals_13, buf16, buf17, 512, grid=grid(512), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 1, 1), (256, 1, 1, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf19, primals_15, 1024, grid=grid(1024), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 1, 1), (256, 1, 1, 1)) buf21 = buf20; del buf20 # reuse buf22 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch.float32) buf23 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch.int8) # Topologically Sorted Source Nodes: [x_17, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_pool2d_with_indices] triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7.run(buf21, primals_17, buf22, buf23, 1024, grid=grid(1024), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 1, 1), (512, 1, 1, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf25, primals_19, 2048, grid=grid(2048), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 1, 1), (512, 1, 1, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf27, primals_21, 2048, grid=grid(2048), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 2, 2), (1024, 4, 2, 1)) buf29 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf28, buf21, buf29, 2048, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 1, 1), (256, 1, 1, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf31, primals_24, 1024, grid=grid(1024), stream=stream0) del primals_24 # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 1, 1), (256, 1, 1, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [x_28, x_29], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf33, primals_26, 1024, grid=grid(1024), stream=stream0) del primals_26 # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_27, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 2, 2), (512, 4, 2, 1)) buf35 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(buf34, buf15, buf35, 1024, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 1, 1), (128, 1, 1, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf37, primals_29, 512, grid=grid(512), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 1, 1), (128, 1, 1, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [x_34, x_35], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf39, primals_31, 512, grid=grid(512), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_32, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 2, 2), (256, 4, 2, 1)) buf41 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf40, buf9, buf41, 2048, grid=grid(2048), stream=stream0) del buf40 # Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 2, 2), (256, 4, 2, 1)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf43, primals_34, 1024, grid=grid(1024), stream=stream0) del primals_34 # Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 2, 2), (256, 4, 2, 1)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [x_40, x_41], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf45, primals_36, 1024, grid=grid(1024), stream=stream0) del primals_36 # Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_37, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 32, 4, 4), (512, 16, 4, 1)) buf47 = reinterpret_tensor(buf28, (4, 64, 4, 4), (1024, 16, 4, 1), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(buf46, buf3, buf47, 4096, grid=grid(4096), stream=stream0) del buf46 # Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 32, 4, 4), (512, 16, 4, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf49, primals_39, 2048, grid=grid(2048), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 32, 4, 4), (512, 16, 4, 1)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [x_46, x_47], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf51, primals_41, 2048, grid=grid(2048), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [x_48], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 12, 4, 4), (192, 16, 4, 1)) buf53 = empty_strided_cuda((4, 4, 2, 4, 2, 3), (192, 48, 24, 6, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_13.run(buf52, primals_43, buf53, 128, 6, grid=grid(128, 6), stream=stream0) del buf52 del primals_43 return (reinterpret_tensor(buf53, (4, 3, 8, 8), (192, 1, 24, 3), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_23, primals_25, primals_27, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_38, primals_40, primals_42, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((64, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((12, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn def pixel_shuffle(input, upscale_factor, depth_first=False): """Rearranges elements in a tensor of shape :math:`[*, C*r^2, H, W]` to a tensor of shape :math:`[C, H*r, W*r]`. See :class:`~torch.nn.PixelShuffle` for details. Args: input (Tensor): Input upscale_factor (int): factor to increase spatial resolution by Examples:: >>> ps = nn.PixelShuffle(3) >>> input = torch.empty(1, 9, 4, 4) >>> output = ps(input) >>> print(output.size()) torch.Size([1, 1, 12, 12]) """ batch_size, channels, in_height, in_width = input.size() channels //= upscale_factor ** 2 out_height = in_height * upscale_factor out_width = in_width * upscale_factor if not depth_first: input_view = input.contiguous().view(batch_size, channels, upscale_factor, upscale_factor, in_height, in_width) shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous() return shuffle_out.view(batch_size, channels, out_height, out_width) else: input_view = input.contiguous().view(batch_size, upscale_factor, upscale_factor, channels, in_height, in_width) shuffle_out = input_view.permute(0, 4, 1, 5, 2, 3).contiguous().view( batch_size, out_height, out_width, channels) return shuffle_out.permute(0, 3, 1, 2) class LSID(nn.Module): def __init__(self, inchannel=4, block_size=2): super(LSID, self).__init__() self.block_size = block_size self.conv1_1 = nn.Conv2d(inchannel, 32, kernel_size=3, stride=1, padding=1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.conv1_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True) self.conv2_1 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=True) self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True) self.conv3_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding= 1, bias=True) self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding =1, bias=True) self.conv4_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv4_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv5_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding =1, bias=True) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding =1, bias=True) self.up6 = nn.ConvTranspose2d(512, 256, 2, stride=2, bias=False) self.conv6_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv6_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding =1, bias=True) self.up7 = nn.ConvTranspose2d(256, 128, 2, stride=2, bias=False) self.conv7_1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding =1, bias=True) self.conv7_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding =1, bias=True) self.up8 = nn.ConvTranspose2d(128, 64, 2, stride=2, bias=False) self.conv8_1 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding= 1, bias=True) self.conv8_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True) self.up9 = nn.ConvTranspose2d(64, 32, 2, stride=2, bias=False) self.conv9_1 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv9_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) out_channel = 3 * self.block_size * self.block_size self.conv10 = nn.Conv2d(32, out_channel, kernel_size=1, stride=1, padding=0, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) m.bias.data.zero_() elif isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) def forward(self, x): x = self.conv1_1(x) x = self.lrelu(x) x = self.conv1_2(x) x = self.lrelu(x) conv1 = x x = self.maxpool(x) x = self.conv2_1(x) x = self.lrelu(x) x = self.conv2_2(x) x = self.lrelu(x) conv2 = x x = self.maxpool(x) x = self.conv3_1(x) x = self.lrelu(x) x = self.conv3_2(x) x = self.lrelu(x) conv3 = x x = self.maxpool(x) x = self.conv4_1(x) x = self.lrelu(x) x = self.conv4_2(x) x = self.lrelu(x) conv4 = x x = self.maxpool(x) x = self.conv5_1(x) x = self.lrelu(x) x = self.conv5_2(x) x = self.lrelu(x) x = self.up6(x) x = torch.cat((x[:, :, :conv4.size(2), :conv4.size(3)], conv4), 1) x = self.conv6_1(x) x = self.lrelu(x) x = self.conv6_2(x) x = self.lrelu(x) x = self.up7(x) x = torch.cat((x[:, :, :conv3.size(2), :conv3.size(3)], conv3), 1) x = self.conv7_1(x) x = self.lrelu(x) x = self.conv7_2(x) x = self.lrelu(x) x = self.up8(x) x = torch.cat((x[:, :, :conv2.size(2), :conv2.size(3)], conv2), 1) x = self.conv8_1(x) x = self.lrelu(x) x = self.conv8_2(x) x = self.lrelu(x) x = self.up9(x) x = torch.cat((x[:, :, :conv1.size(2), :conv1.size(3)], conv1), 1) x = self.conv9_1(x) x = self.lrelu(x) x = self.conv9_2(x) x = self.lrelu(x) x = self.conv10(x) depth_to_space_conv = pixel_shuffle(x, upscale_factor=self. block_size, depth_first=True) return depth_to_space_conv def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.full([1], 0, tl.int64) tmp9 = tmp8 >= tmp8 tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp12 & tmp12 tmp14 = tmp10 >= tmp8 tmp15 = tmp10 < tmp10 tmp16 = tmp14 & tmp15 tmp12 & tmp16 tmp18 = triton_helpers.maximum(tmp7, tmp7) tmp16 & tmp12 tmp20 = triton_helpers.maximum(tmp7, tmp18) tmp16 & tmp16 tmp22 = triton_helpers.maximum(tmp7, tmp20) tmp23 = tmp7 > tmp7 tmp24 = tl.full([1], 1, tl.int8) tmp25 = tl.full([1], 0, tl.int8) tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tmp7 > tmp18 tmp28 = tl.full([1], 2, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp7 > tmp20 tmp31 = tl.full([1], 3, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.full([1], 0, tl.int64) tmp9 = tmp8 >= tmp8 tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp12 & tmp12 tmp14 = tmp10 >= tmp8 tmp15 = tmp10 < tmp10 tmp16 = tmp14 & tmp15 tmp12 & tmp16 tmp18 = triton_helpers.maximum(tmp7, tmp7) tmp16 & tmp12 tmp20 = triton_helpers.maximum(tmp7, tmp18) tmp16 & tmp16 tmp22 = triton_helpers.maximum(tmp7, tmp20) tmp23 = tmp7 > tmp7 tmp24 = tl.full([1], 1, tl.int8) tmp25 = tl.full([1], 0, tl.int8) tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tmp7 > tmp18 tmp28 = tl.full([1], 2, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp7 > tmp20 tmp31 = tl.full([1], 3, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, None) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 1024 * x1), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp9 = tl.load(in_ptr1 + (256 * x1 + (-256 + x0)), tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, None) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x0 + 512 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp9 = tl.load(in_ptr1 + (128 * x1 + (-128 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 128 x0 = xindex % 4 x2 = xindex // 512 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 256 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-64 + x1) + 256 * x2), tmp6, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 64 x0 = xindex % 16 x2 = xindex // 1024 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 512 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-32 + x1) + 512 * x2), tmp6, other=0.0 ) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_clone_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 6 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y1 = yindex // 4 % 2 y2 = yindex // 8 % 4 y3 = yindex // 32 y6 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x4 + 96 * y1 + 192 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 6 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4 + 6 * y6), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43 ) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_24, (256,), (1,)) assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_26, (256,), (1,)) assert_size_stride(primals_27, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_28, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_33, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_34, (64,), (1,)) assert_size_stride(primals_35, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_36, (64,), (1,)) assert_size_stride(primals_37, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_38, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (32,), (1,)) assert_size_stride(primals_40, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_41, (32,), (1,)) assert_size_stride(primals_42, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_43, (12,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(2048)](buf1, primals_2, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_0[grid(2048)](buf3, primals_5, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.float32) buf5 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(512)](buf3, buf4, buf5, 512, XBLOCK=128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 2, 2), (256, 4, 2, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_2[grid(1024)](buf7, primals_7, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 2, 2), (256, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_leaky_relu_2[grid(1024)](buf9, primals_9, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(256)](buf9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 1, 1), (128, 1, 1, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_leaky_relu_4[grid(512)](buf13, primals_11, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 1, 1), (128, 1, 1, 1)) buf15 = buf14 del buf14 buf16 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch. float32) buf17 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.int8) triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_5[grid (512)](buf15, primals_13, buf16, buf17, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 1, 1), (256, 1, 1, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_leaky_relu_6[grid(1024)](buf19, primals_15, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 1, 1), (256, 1, 1, 1)) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch. float32) buf23 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch.int8) triton_poi_fused_convolution_leaky_relu_max_pool2d_with_indices_7[grid (1024)](buf21, primals_17, buf22, buf23, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 1, 1), (512, 1, 1, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_leaky_relu_8[grid(2048)](buf25, primals_19, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 1, 1), (512, 1, 1, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_leaky_relu_8[grid(2048)](buf27, primals_21, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 2, 2), (1024, 4, 2, 1)) buf29 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 1, 1), torch. float32) triton_poi_fused_cat_9[grid(2048)](buf28, buf21, buf29, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 1, 1), (256, 1, 1, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_leaky_relu_6[grid(1024)](buf31, primals_24, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_24 buf32 = extern_kernels.convolution(buf31, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 1, 1), (256, 1, 1, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_leaky_relu_6[grid(1024)](buf33, primals_26, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_26 buf34 = extern_kernels.convolution(buf33, primals_27, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 2, 2), (512, 4, 2, 1)) buf35 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1, 1), torch. float32) triton_poi_fused_cat_10[grid(1024)](buf34, buf15, buf35, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 1, 1), (128, 1, 1, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_leaky_relu_4[grid(512)](buf37, primals_29, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 1, 1), (128, 1, 1, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_leaky_relu_4[grid(512)](buf39, primals_31, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_31 buf40 = extern_kernels.convolution(buf39, primals_32, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 2, 2), (256, 4, 2, 1)) buf41 = buf34 del buf34 triton_poi_fused_cat_11[grid(2048)](buf40, buf9, buf41, 2048, XBLOCK=256, num_warps=4, num_stages=1) del buf40 buf42 = extern_kernels.convolution(buf41, primals_33, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 2, 2), (256, 4, 2, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_leaky_relu_2[grid(1024)](buf43, primals_34, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_34 buf44 = extern_kernels.convolution(buf43, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 2, 2), (256, 4, 2, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_leaky_relu_2[grid(1024)](buf45, primals_36, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_36 buf46 = extern_kernels.convolution(buf45, primals_37, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 32, 4, 4), (512, 16, 4, 1)) buf47 = reinterpret_tensor(buf28, (4, 64, 4, 4), (1024, 16, 4, 1), 0) del buf28 triton_poi_fused_cat_12[grid(4096)](buf46, buf3, buf47, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf46 buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 32, 4, 4), (512, 16, 4, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_leaky_relu_0[grid(2048)](buf49, primals_39, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_39 buf50 = extern_kernels.convolution(buf49, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 32, 4, 4), (512, 16, 4, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_leaky_relu_0[grid(2048)](buf51, primals_41, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_41 buf52 = extern_kernels.convolution(buf51, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 12, 4, 4), (192, 16, 4, 1)) buf53 = empty_strided_cuda((4, 4, 2, 4, 2, 3), (192, 48, 24, 6, 3, 1), torch.float32) triton_poi_fused_clone_13[grid(128, 6)](buf52, primals_43, buf53, 128, 6, XBLOCK=2, YBLOCK=128, num_warps=4, num_stages=1) del buf52 del primals_43 return (reinterpret_tensor(buf53, (4, 3, 8, 8), (192, 1, 24, 3), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_23, primals_25, primals_27, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_38, primals_40, primals_42, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51) def pixel_shuffle(input, upscale_factor, depth_first=False): """Rearranges elements in a tensor of shape :math:`[*, C*r^2, H, W]` to a tensor of shape :math:`[C, H*r, W*r]`. See :class:`~torch.nn.PixelShuffle` for details. Args: input (Tensor): Input upscale_factor (int): factor to increase spatial resolution by Examples:: >>> ps = nn.PixelShuffle(3) >>> input = torch.empty(1, 9, 4, 4) >>> output = ps(input) >>> print(output.size()) torch.Size([1, 1, 12, 12]) """ batch_size, channels, in_height, in_width = input.size() channels //= upscale_factor ** 2 out_height = in_height * upscale_factor out_width = in_width * upscale_factor if not depth_first: input_view = input.contiguous().view(batch_size, channels, upscale_factor, upscale_factor, in_height, in_width) shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous() return shuffle_out.view(batch_size, channels, out_height, out_width) else: input_view = input.contiguous().view(batch_size, upscale_factor, upscale_factor, channels, in_height, in_width) shuffle_out = input_view.permute(0, 4, 1, 5, 2, 3).contiguous().view( batch_size, out_height, out_width, channels) return shuffle_out.permute(0, 3, 1, 2) class LSIDNew(nn.Module): def __init__(self, inchannel=4, block_size=2): super(LSIDNew, self).__init__() self.block_size = block_size self.conv1_1 = nn.Conv2d(inchannel, 32, kernel_size=3, stride=1, padding=1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.conv1_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True) self.conv2_1 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=True) self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True) self.conv3_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding= 1, bias=True) self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding =1, bias=True) self.conv4_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv4_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv5_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding =1, bias=True) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding =1, bias=True) self.up6 = nn.ConvTranspose2d(512, 256, 2, stride=2, bias=False) self.conv6_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding =1, bias=True) self.conv6_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding =1, bias=True) self.up7 = nn.ConvTranspose2d(256, 128, 2, stride=2, bias=False) self.conv7_1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding =1, bias=True) self.conv7_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding =1, bias=True) self.up8 = nn.ConvTranspose2d(128, 64, 2, stride=2, bias=False) self.conv8_1 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding= 1, bias=True) self.conv8_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True) self.up9 = nn.ConvTranspose2d(64, 32, 2, stride=2, bias=False) self.conv9_1 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv9_2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) out_channel = 3 * self.block_size * self.block_size self.conv10 = nn.Conv2d(32, out_channel, kernel_size=1, stride=1, padding=0, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) m.bias.data.zero_() elif isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv4_1.weight primals_15 = self.conv4_1.bias primals_16 = self.conv4_2.weight primals_17 = self.conv4_2.bias primals_18 = self.conv5_1.weight primals_19 = self.conv5_1.bias primals_20 = self.conv5_2.weight primals_21 = self.conv5_2.bias primals_22 = self.up6.weight primals_23 = self.conv6_1.weight primals_24 = self.conv6_1.bias primals_25 = self.conv6_2.weight primals_26 = self.conv6_2.bias primals_27 = self.up7.weight primals_28 = self.conv7_1.weight primals_29 = self.conv7_1.bias primals_30 = self.conv7_2.weight primals_31 = self.conv7_2.bias primals_32 = self.up8.weight primals_33 = self.conv8_1.weight primals_34 = self.conv8_1.bias primals_35 = self.conv8_2.weight primals_36 = self.conv8_2.bias primals_37 = self.up9.weight primals_38 = self.conv9_1.weight primals_39 = self.conv9_1.bias primals_40 = self.conv9_2.weight primals_41 = self.conv9_2.bias primals_42 = self.conv10.weight primals_43 = self.conv10.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43]) return output[0]
cydonia999/Learning_to_See_in_the_Dark_PyTorch
LSID
false
15,162
[ "MIT" ]
77
470a6a8e9c6367d8fa88ee6d1dea211dd9fb1f81
https://github.com/cydonia999/Learning_to_See_in_the_Dark_PyTorch/tree/470a6a8e9c6367d8fa88ee6d1dea211dd9fb1f81
HexaLinearScore
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ar/carmia3wh3a4soirr2hwld2ewi75pvcghm4jka76mzx45462dleq.py # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # g1 => clone, view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [8, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ht/chtw7yb2aeiwqf7e6wcciqo5cs7snfwq55c2zmy7756abjt5odwg.py # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # g2 => clone_1, view_2 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) # %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_1, [8, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_1 = async_compile.triton('triton_poi_fused__unsafe_view_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6y/c6yrrdc2nauu5gzytu6guntvth4pajjmsnsfsjwimejql5e62rme.py # Topologically Sorted Source Nodes: [g3], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # g3 => clone_2, view_4 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [8, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_2 = async_compile.triton('triton_poi_fused__unsafe_view_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (4*(x1 % 2)) + (16*(x1 // 2))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u5/cu54wdpiiawrisqv7ugmuf666653hkp2zredg43wlga6qn64lsbl.py # Topologically Sorted Source Nodes: [mul, temp01], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # temp01 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_5), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3168 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oj/cojedjn2cyrz3u5rpahykrfq5sui4tyzd4aus2aip6rvwrpymfzj.py # Topologically Sorted Source Nodes: [temp02], Original ATen: [aten.mul] # Source node to ATen node mapping: # temp02 => mul_2, mul_3 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %mul_2), kwargs = {}) triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25344 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex % 1584 x0 = xindex % 396 x3 = (xindex // 6336) x2 = (xindex // 1584) % 4 x4 = (xindex // 1584) tmp0 = tl.load(in_ptr0 + (x5), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (396*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (396*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x5 + (1600*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jy/cjyih4xrikzd7ipfrh25genasic2vujoqtxwqanglf4xqxvltqos.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] # Source node to ATen node mapping: # score => bmm # Graph fragment: # %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_6, %view_7), kwargs = {}) # %permute_10 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_7, [0, 2, 1]), kwargs = {}) triton_poi_fused_bmm_transpose_5 = async_compile.triton('triton_poi_fused_bmm_transpose_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 25344 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x1 = (xindex // 396) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (396*(x1 % 4)) + (1600*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) tl.store(out_ptr1 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4g/c4guh5fiotlbffthmqf7v7xrn4nvwh2cb5ppnveaklu6nk2lqe3f.py # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div] # Source node to ATen node mapping: # score_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 19.8997487421324), kwargs = {}) triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_6(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.050251890762960605 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 396), (396, 1)) assert_size_stride(primals_3, (4, 396), (396, 1)) assert_size_stride(primals_4, (4, 396), (396, 1)) assert_size_stride(primals_5, (4, 20), (20, 1)) assert_size_stride(primals_6, (20, 396), (396, 1)) assert_size_stride(primals_7, (20, 396), (396, 1)) assert_size_stride(primals_8, (20, 396), (396, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0) buf1 = empty_strided_cuda((8, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.mm] extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_1.run(primals_1, buf2, 32, grid=grid(32), stream=stream0) buf3 = empty_strided_cuda((8, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_3, out=buf3) del primals_3 buf4 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g3], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_2.run(primals_1, buf4, 32, grid=grid(32), stream=stream0) del primals_1 buf5 = empty_strided_cuda((8, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g3], Original ATen: [aten.mm] extern_kernels.mm(buf4, primals_4, out=buf5) del primals_4 buf6 = empty_strided_cuda((4, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g4], Original ATen: [aten.mm] extern_kernels.mm(primals_5, primals_6, out=buf6) buf7 = empty_strided_cuda((4, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g5], Original ATen: [aten.mm] extern_kernels.mm(primals_5, primals_7, out=buf7) buf8 = empty_strided_cuda((4, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g6], Original ATen: [aten.mm] extern_kernels.mm(primals_5, primals_8, out=buf8) buf9 = empty_strided_cuda((4, 2, 396), (792, 396, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, temp01], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf1, buf3, buf5, buf9, 3168, grid=grid(3168), stream=stream0) buf10 = empty_strided_cuda((4, 4, 4, 396), (6400, 1600, 396, 1), torch.float32) # Topologically Sorted Source Nodes: [temp02], Original ATen: [aten.mul] triton_poi_fused_mul_4.run(buf8, buf6, buf7, buf10, 25344, grid=grid(25344), stream=stream0) buf11 = empty_strided_cuda((1, 396, 64), (25344, 1, 396), torch.float32) buf14 = empty_strided_cuda((1, 64, 396), (25344, 396, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_5.run(buf10, buf11, buf14, 25344, grid=grid(25344), stream=stream0) del buf10 buf12 = empty_strided_cuda((1, 8, 64), (512, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (1, 8, 396), (0, 396, 1), 0), buf11, out=buf12) del buf11 buf13 = reinterpret_tensor(buf12, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div] triton_poi_fused_div_6.run(buf13, 512, grid=grid(512), stream=stream0) return (buf13, buf1, buf3, buf5, buf6, buf7, buf8, reinterpret_tensor(buf9, (1, 396, 8), (3168, 1, 396), 0), buf14, reinterpret_tensor(primals_5, (20, 4), (1, 20), 0), reinterpret_tensor(primals_8, (396, 20), (1, 396), 0), reinterpret_tensor(primals_7, (396, 20), (1, 396), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0), reinterpret_tensor(buf4, (4, 8), (1, 4), 0), reinterpret_tensor(buf2, (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class HexaLinearScore(nn.Module): """ Outer product version of hexalinear function for sequence labeling. """ def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std= 0.1545, normalization=True, **kwargs): """ Args: wemb_size: word embedding hidden size tagset_size: tag set size temb_size: tag embedding size rank: rank of the weight tensor std: standard deviation of the tensor """ super(HexaLinearScore, self).__init__() self.wemb_size = wemb_size self.tagset_size = tagset_size self.temb_size = temb_size self.rank = rank self.std = std self.normalization = normalization self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self. temb_size)) self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.rand_init() self def rand_init(self): """random initialization """ nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b= math.sqrt(6 / self.temb_size)) nn.init.normal_(self.T1, std=self.std) nn.init.normal_(self.T2, std=self.std) nn.init.normal_(self.T3, std=self.std) nn.init.normal_(self.W1, std=self.std) nn.init.normal_(self.W2, std=self.std) nn.init.normal_(self.W3, std=self.std) def forward(self, word_emb): """ Args: word_emb: [batch, sent_length, wemb_size] Returns: Tensor [batch, sent_length-window_size, tagset_size, tagset_size] """ assert word_emb.size(2 ) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.' g1 = torch.matmul(word_emb[:, :-2], self.W1) g2 = torch.matmul(word_emb[:, 1:-1], self.W2) g3 = torch.matmul(word_emb[:, 2:], self.W3) g4 = torch.matmul(self.tag_emd, self.T1) g5 = torch.matmul(self.tag_emd, self.T2) g6 = torch.matmul(self.tag_emd, self.T3) temp01 = g1 * g2 * g3 temp02 = torch.einsum('ak,bk,ck->abck', [g4, g5, g6]) score = torch.einsum('nmk,abck->nmabc', [temp01, temp02]) if self.normalization: score = score / math.sqrt(self.rank) return score def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'wemb_size': 4, 'tagset_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__unsafe_view_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 4 * (x1 % 2) + 16 * (x1 // 2)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3168 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25344 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex % 1584 x0 = xindex % 396 x3 = xindex // 6336 x2 = xindex // 1584 % 4 x4 = xindex // 1584 tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 396 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 396 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x5 + 1600 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_bmm_transpose_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 25344 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x1 = xindex // 396 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) tl.store(out_ptr1 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_div_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.050251890762960605 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 396), (396, 1)) assert_size_stride(primals_3, (4, 396), (396, 1)) assert_size_stride(primals_4, (4, 396), (396, 1)) assert_size_stride(primals_5, (4, 20), (20, 1)) assert_size_stride(primals_6, (20, 396), (396, 1)) assert_size_stride(primals_7, (20, 396), (396, 1)) assert_size_stride(primals_8, (20, 396), (396, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((8, 396), (396, 1), torch.float32) extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((8, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_1[grid(32)](primals_1, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((8, 396), (396, 1), torch.float32) extern_kernels.mm(buf2, primals_3, out=buf3) del primals_3 buf4 = empty_strided_cuda((8, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_2[grid(32)](primals_1, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf5 = empty_strided_cuda((8, 396), (396, 1), torch.float32) extern_kernels.mm(buf4, primals_4, out=buf5) del primals_4 buf6 = empty_strided_cuda((4, 396), (396, 1), torch.float32) extern_kernels.mm(primals_5, primals_6, out=buf6) buf7 = empty_strided_cuda((4, 396), (396, 1), torch.float32) extern_kernels.mm(primals_5, primals_7, out=buf7) buf8 = empty_strided_cuda((4, 396), (396, 1), torch.float32) extern_kernels.mm(primals_5, primals_8, out=buf8) buf9 = empty_strided_cuda((4, 2, 396), (792, 396, 1), torch.float32) triton_poi_fused_mul_3[grid(3168)](buf1, buf3, buf5, buf9, 3168, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 396), (6400, 1600, 396, 1), torch.float32) triton_poi_fused_mul_4[grid(25344)](buf8, buf6, buf7, buf10, 25344, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((1, 396, 64), (25344, 1, 396), torch.float32 ) buf14 = empty_strided_cuda((1, 64, 396), (25344, 396, 1), torch.float32 ) triton_poi_fused_bmm_transpose_5[grid(25344)](buf10, buf11, buf14, 25344, XBLOCK=128, num_warps=4, num_stages=1) del buf10 buf12 = empty_strided_cuda((1, 8, 64), (512, 64, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (1, 8, 396), (0, 396, 1 ), 0), buf11, out=buf12) del buf11 buf13 = reinterpret_tensor(buf12, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0) del buf12 triton_poi_fused_div_6[grid(512)](buf13, 512, XBLOCK=256, num_warps =4, num_stages=1) return buf13, buf1, buf3, buf5, buf6, buf7, buf8, reinterpret_tensor(buf9, (1, 396, 8), (3168, 1, 396), 0), buf14, reinterpret_tensor(primals_5, (20, 4), (1, 20), 0), reinterpret_tensor(primals_8, (396, 20), (1, 396), 0), reinterpret_tensor(primals_7, (396, 20), (1, 396), 0 ), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0 ), reinterpret_tensor(buf4, (4, 8), (1, 4), 0), reinterpret_tensor(buf2 , (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0) class HexaLinearScoreNew(nn.Module): """ Outer product version of hexalinear function for sequence labeling. """ def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std= 0.1545, normalization=True, **kwargs): """ Args: wemb_size: word embedding hidden size tagset_size: tag set size temb_size: tag embedding size rank: rank of the weight tensor std: standard deviation of the tensor """ super(HexaLinearScoreNew, self).__init__() self.wemb_size = wemb_size self.tagset_size = tagset_size self.temb_size = temb_size self.rank = rank self.std = std self.normalization = normalization self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self. temb_size)) self.W1 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.W2 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.W3 = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.T1 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.T2 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.T3 = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.rand_init() self def rand_init(self): """random initialization """ nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b= math.sqrt(6 / self.temb_size)) nn.init.normal_(self.T1, std=self.std) nn.init.normal_(self.T2, std=self.std) nn.init.normal_(self.T3, std=self.std) nn.init.normal_(self.W1, std=self.std) nn.init.normal_(self.W2, std=self.std) nn.init.normal_(self.W3, std=self.std) def forward(self, input_0): primals_5 = self.tag_emd primals_2 = self.W1 primals_3 = self.W2 primals_4 = self.W3 primals_6 = self.T1 primals_7 = self.T2 primals_8 = self.T3 primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
db-bionlp/CLNER
HexaLinearScore
false
15,163
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
GraphAttentionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xd/cxdwtygncgwjegadoo262dgmykjc5ooocufkdnt5on6exr5afkek.py # Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xo/cxosd6rjujtkwqu2aqnwcgad4pzczk77e24k3flwrt5d5ujbwzwg.py # Topologically Sorted Source Nodes: [add, div], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # div => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, 1e-07), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, %add), kwargs = {}) triton_poi_fused_add_div_1 = async_compile.triton('triton_poi_fused_add_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zs/czs4pm2sld6srpjj2hn34get4x3mh33uxlbbg3ci35mcxkqhrfyw.py # Topologically Sorted Source Nodes: [cos, sub, mask, masked, P], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax] # Source node to ATen node mapping: # P => amax, exp, sub_1, sum_2 # cos => mul # mask => mul_1 # masked => add_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1000000000.0), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_1, [1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused__softmax_add_mul_rsub_2 = async_compile.triton('triton_poi_fused__softmax_add_mul_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_rsub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = -1000000000.0 tmp8 = tmp6 * tmp7 tmp9 = tmp3 + tmp8 tmp11 = tmp1 * tmp10 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp7 tmp15 = tmp11 + tmp14 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp18 = tmp1 * tmp17 tmp20 = tmp5 - tmp19 tmp21 = tmp20 * tmp7 tmp22 = tmp18 + tmp21 tmp23 = triton_helpers.maximum(tmp16, tmp22) tmp25 = tmp1 * tmp24 tmp27 = tmp5 - tmp26 tmp28 = tmp27 * tmp7 tmp29 = tmp25 + tmp28 tmp30 = triton_helpers.maximum(tmp23, tmp29) tmp31 = tmp9 - tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = tmp15 - tmp30 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = tmp22 - tmp30 tmp37 = tl_math.exp(tmp36) tmp38 = tmp35 + tmp37 tmp39 = tmp29 - tmp30 tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tl.store(out_ptr0 + (x0), tmp30, xmask) tl.store(out_ptr1 + (x0), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bo/cbosarbry766an4rzjlpfxxokwi3hn3gmqpta34tpdyh6jotaz6u.py # Topologically Sorted Source Nodes: [cos, sub, mask, masked, P], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax] # Source node to ATen node mapping: # P => div_1, exp, sub_1 # cos => mul # mask => mul_1 # masked => add_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1000000000.0), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_add_mul_rsub_3 = async_compile.triton('triton_poi_fused__softmax_add_mul_rsub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_rsub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = -1000000000.0 tmp8 = tmp6 * tmp7 tmp9 = tmp3 + tmp8 tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_poi_fused_linalg_vector_norm_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (4, 1), (1, 0), 0), reinterpret_tensor(buf0, (1, 4), (0, 1), 0), out=buf2) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [add, div], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_1.run(buf3, buf2, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf0, (4, 1), (1, 4), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [cos, sub, mask, masked, P], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax] triton_poi_fused__softmax_add_mul_rsub_2.run(primals_2, buf3, primals_3, buf4, buf5, 4, grid=grid(4), stream=stream0) buf6 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [cos, sub, mask, masked, P], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax] triton_poi_fused__softmax_add_mul_rsub_3.run(primals_2, buf3, primals_3, buf4, buf5, buf6, 16, grid=grid(16), stream=stream0) del buf4 del buf5 del primals_2 del primals_3 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(buf6, primals_1, out=buf7) return (buf7, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf3, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.autograd import Variable import torch.nn.functional as F class GraphAttentionLayer(nn.Module): def __init__(self, requires_grad=True): super(GraphAttentionLayer, self).__init__() if requires_grad: self.beta = Parameter(torch.Tensor(1).uniform_(0, 1), requires_grad=requires_grad) else: self.beta = Variable(torch.zeros(1), requires_grad=requires_grad) def forward(self, x, adj): norm2 = torch.norm(x, 2, 1).view(-1, 1) cos = self.beta * torch.div(torch.mm(x, x.t()), torch.mm(norm2, norm2.t()) + 1e-07) mask = (1.0 - adj) * -1000000000.0 masked = cos + mask P = F.softmax(masked, dim=1) output = torch.mm(P, x) return output def __repr__(self): return self.__class__.__name__ + ' (16 -> 16)' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.nn.parameter import Parameter from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = 1e-07 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 * tmp2 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = -1000000000.0 tmp8 = tmp6 * tmp7 tmp9 = tmp3 + tmp8 tmp11 = tmp1 * tmp10 tmp13 = tmp5 - tmp12 tmp14 = tmp13 * tmp7 tmp15 = tmp11 + tmp14 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp18 = tmp1 * tmp17 tmp20 = tmp5 - tmp19 tmp21 = tmp20 * tmp7 tmp22 = tmp18 + tmp21 tmp23 = triton_helpers.maximum(tmp16, tmp22) tmp25 = tmp1 * tmp24 tmp27 = tmp5 - tmp26 tmp28 = tmp27 * tmp7 tmp29 = tmp25 + tmp28 tmp30 = triton_helpers.maximum(tmp23, tmp29) tmp31 = tmp9 - tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = tmp15 - tmp30 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = tmp22 - tmp30 tmp37 = tl_math.exp(tmp36) tmp38 = tmp35 + tmp37 tmp39 = tmp29 - tmp30 tmp40 = tl_math.exp(tmp39) tmp41 = tmp38 + tmp40 tl.store(out_ptr0 + x0, tmp30, xmask) tl.store(out_ptr1 + x0, tmp41, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = -1000000000.0 tmp8 = tmp6 * tmp7 tmp9 = tmp3 + tmp8 tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 1), (1, 0), 0), reinterpret_tensor(buf0, (1, 4), (0, 1), 0), out=buf2) buf3 = buf1 del buf1 triton_poi_fused_add_div_1[grid(16)](buf3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 1), (1, 4), 0) del buf0 buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_mul_rsub_2[grid(4)](primals_2, buf3, primals_3, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = buf2 del buf2 triton_poi_fused__softmax_add_mul_rsub_3[grid(16)](primals_2, buf3, primals_3, buf4, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 del buf5 del primals_2 del primals_3 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf6, primals_1, out=buf7) return buf7, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf3, buf6 class GraphAttentionLayerNew(nn.Module): def __init__(self, requires_grad=True): super(GraphAttentionLayerNew, self).__init__() if requires_grad: self.beta = Parameter(torch.Tensor(1).uniform_(0, 1), requires_grad=requires_grad) else: self.beta = Variable(torch.zeros(1), requires_grad=requires_grad) def __repr__(self): return self.__class__.__name__ + ' (16 -> 16)' def forward(self, input_0, input_1): primals_2 = self.beta primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
dawnranger/pytorch-AGNN
GraphAttentionLayer
false
15,164
[ "MIT" ]
137
461f71b45e5eaddb50cff31a537b06cb1a50ba8f
https://github.com/dawnranger/pytorch-AGNN/tree/461f71b45e5eaddb50cff31a537b06cb1a50ba8f
QuadriLinearScore
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/v3/cv3ybrq4mz3m4ysotv2pijcf4hwcak3rq4zzuh4c7ra25ls6d2f2.py # Topologically Sorted Source Nodes: [g0], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # g0 => clone, view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [12, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 3)) + (16*(x1 // 3))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uy/cuyr6zgmv4trebzrxzaaydiawo5g5pt6xgkahl4nfpc4kmvsr4wb.py # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # g1 => clone_1, view_2 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_4,), kwargs = {memory_format: torch.contiguous_format}) # %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_1, [12, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_1 = async_compile.triton('triton_poi_fused__unsafe_view_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (4*(x1 % 3)) + (16*(x1 // 3))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ae/cae5qllyrn5trsejvmxiqy5ctiy3vsb3lfas6nhktsovuegggrry.py # Topologically Sorted Source Nodes: [temp012], Original ATen: [aten.mul] # Source node to ATen node mapping: # temp012 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x2 = (xindex // 1584) x3 = xindex % 1584 tmp0 = tl.load(in_ptr0 + (x0 + (396*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (396*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3 + (1600*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/ctstdurw7bc4bbvnxunt3mgwrvxsegqqit5fbbae7olo7trhm3vv.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] # Source node to ATen node mapping: # score => bmm # Graph fragment: # %bmm : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_4, %view_5), kwargs = {}) # %permute_8 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_4, [0, 2, 1]), kwargs = {}) triton_poi_fused_bmm_transpose_3 = async_compile.triton('triton_poi_fused_bmm_transpose_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_transpose_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x1 = (xindex // 396) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (396*(x1 % 4)) + (1600*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) tl.store(out_ptr1 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/e2/ce2vbtsfqo5opo5oc54myuy2kkysz6rz4zvpqo3jrsw43ldfwvz4.py # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div] # Source node to ATen node mapping: # score_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 19.8997487421324), kwargs = {}) triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.050251890762960605 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 396), (396, 1)) assert_size_stride(primals_3, (4, 396), (396, 1)) assert_size_stride(primals_4, (4, 20), (20, 1)) assert_size_stride(primals_5, (20, 396), (396, 1)) assert_size_stride(primals_6, (20, 396), (396, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((12, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g0], Original ATen: [aten.clone, aten._unsafe_view] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 48, grid=grid(48), stream=stream0) buf1 = empty_strided_cuda((12, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g0], Original ATen: [aten.mm] extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_1.run(primals_1, buf2, 48, grid=grid(48), stream=stream0) del primals_1 buf3 = empty_strided_cuda((12, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_3, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.mm] extern_kernels.mm(primals_4, primals_5, out=buf4) buf5 = empty_strided_cuda((4, 396), (396, 1), torch.float32) # Topologically Sorted Source Nodes: [g3], Original ATen: [aten.mm] extern_kernels.mm(primals_4, primals_6, out=buf5) buf6 = empty_strided_cuda((4, 3, 4, 396), (4800, 1600, 396, 1), torch.float32) # Topologically Sorted Source Nodes: [temp012], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf1, buf3, buf4, buf6, 19008, grid=grid(19008), stream=stream0) buf7 = empty_strided_cuda((1, 48, 396), (19008, 396, 1), torch.float32) buf10 = empty_strided_cuda((1, 396, 48), (19008, 1, 396), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_3.run(buf6, buf7, buf10, 19008, grid=grid(19008), stream=stream0) del buf6 buf8 = empty_strided_cuda((1, 48, 4), (192, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf5, (1, 396, 4), (0, 1, 396), 0), out=buf8) del buf7 buf9 = reinterpret_tensor(buf8, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten.div] triton_poi_fused_div_4.run(buf9, 192, grid=grid(192), stream=stream0) return (buf9, buf1, buf3, buf4, buf10, reinterpret_tensor(buf5, (1, 4, 396), (396, 396, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0), reinterpret_tensor(primals_5, (396, 20), (1, 396), 0), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), reinterpret_tensor(buf0, (4, 12), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((20, 396), (396, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.utils.data.dataloader import torch.nn class QuadriLinearScore(nn.Module): """ Outer product version of quadrilinear function for sequence labeling. """ def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std= 0.1545, window_size=1, normalization=True, **kwargs): """ Args: wemb_size: word embedding hidden size tagset_size: tag set size temb_size: tag embedding size rank: rank of the weight tensor std: standard deviation of the tensor """ super(QuadriLinearScore, self).__init__() self.wemb_size = wemb_size self.tagset_size = tagset_size self.temb_size = temb_size self.rank = rank self.std = std self.window_size = window_size self.normalization = normalization self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self. temb_size)) self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.rand_init() self def rand_init(self): """random initialization """ nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b= math.sqrt(6 / self.temb_size)) nn.init.normal_(self.T, std=self.std) nn.init.normal_(self.U, std=self.std) nn.init.normal_(self.V, std=self.std) nn.init.normal_(self.W, std=self.std) def forward(self, word_emb): """ Args: word_emb: [batch, sent_length, wemb_size] Returns: Tensor [batch, sent_length-window_size, tagset_size, tagset_size] """ assert word_emb.size(2 ) == self.wemb_size, 'batch sizes of encoder and decoder are requires to be equal.' g0 = torch.matmul(word_emb[:, :-self.window_size], self.U) g1 = torch.matmul(word_emb[:, self.window_size:], self.T) g2 = torch.matmul(self.tag_emd, self.V) g3 = torch.matmul(self.tag_emd, self.W) temp01 = g0 * g1 temp012 = torch.einsum('nak,bk->nabk', [temp01, g2]) score = torch.einsum('nabk,ck->nabc', [temp012, g3]) if self.normalization: score = score / math.sqrt(self.rank) return score def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'wemb_size': 4, 'tagset_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 4 * (x1 % 3) + 16 * (x1 // 3)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 19008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x2 = xindex // 1584 x3 = xindex % 1584 tmp0 = tl.load(in_ptr0 + (x0 + 396 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 396 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3 + 1600 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_bmm_transpose_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 396 x1 = xindex // 396 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 396 * (x1 % 4) + 1600 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) tl.store(out_ptr1 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_div_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.050251890762960605 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 396), (396, 1)) assert_size_stride(primals_3, (4, 396), (396, 1)) assert_size_stride(primals_4, (4, 20), (20, 1)) assert_size_stride(primals_5, (20, 396), (396, 1)) assert_size_stride(primals_6, (20, 396), (396, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((12, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(48)](primals_1, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((12, 396), (396, 1), torch.float32) extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((12, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_1[grid(48)](primals_1, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf3 = empty_strided_cuda((12, 396), (396, 1), torch.float32) extern_kernels.mm(buf2, primals_3, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 396), (396, 1), torch.float32) extern_kernels.mm(primals_4, primals_5, out=buf4) buf5 = empty_strided_cuda((4, 396), (396, 1), torch.float32) extern_kernels.mm(primals_4, primals_6, out=buf5) buf6 = empty_strided_cuda((4, 3, 4, 396), (4800, 1600, 396, 1), torch.float32) triton_poi_fused_mul_2[grid(19008)](buf1, buf3, buf4, buf6, 19008, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((1, 48, 396), (19008, 396, 1), torch.float32) buf10 = empty_strided_cuda((1, 396, 48), (19008, 1, 396), torch.float32 ) triton_poi_fused_bmm_transpose_3[grid(19008)](buf6, buf7, buf10, 19008, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = empty_strided_cuda((1, 48, 4), (192, 4, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf5, (1, 396, 4), (0, 1, 396), 0), out=buf8) del buf7 buf9 = reinterpret_tensor(buf8, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf8 triton_poi_fused_div_4[grid(192)](buf9, 192, XBLOCK=256, num_warps= 4, num_stages=1) return buf9, buf1, buf3, buf4, buf10, reinterpret_tensor(buf5, (1, 4, 396), (396, 396, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0), reinterpret_tensor(primals_6, (396, 20), (1, 396), 0 ), reinterpret_tensor(primals_5, (396, 20), (1, 396), 0 ), reinterpret_tensor(buf2, (4, 12), (1, 4), 0), reinterpret_tensor( buf0, (4, 12), (1, 4), 0) class QuadriLinearScoreNew(nn.Module): """ Outer product version of quadrilinear function for sequence labeling. """ def __init__(self, wemb_size, tagset_size, temb_size=20, rank=396, std= 0.1545, window_size=1, normalization=True, **kwargs): """ Args: wemb_size: word embedding hidden size tagset_size: tag set size temb_size: tag embedding size rank: rank of the weight tensor std: standard deviation of the tensor """ super(QuadriLinearScoreNew, self).__init__() self.wemb_size = wemb_size self.tagset_size = tagset_size self.temb_size = temb_size self.rank = rank self.std = std self.window_size = window_size self.normalization = normalization self.tag_emd = nn.Parameter(torch.Tensor(self.tagset_size, self. temb_size)) self.T = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.U = nn.Parameter(torch.Tensor(self.wemb_size, self.rank)) self.V = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.W = nn.Parameter(torch.Tensor(self.temb_size, self.rank)) self.rand_init() self def rand_init(self): """random initialization """ nn.init.uniform_(self.tag_emd, a=math.sqrt(6 / self.temb_size), b= math.sqrt(6 / self.temb_size)) nn.init.normal_(self.T, std=self.std) nn.init.normal_(self.U, std=self.std) nn.init.normal_(self.V, std=self.std) nn.init.normal_(self.W, std=self.std) def forward(self, input_0): primals_4 = self.tag_emd primals_2 = self.T primals_3 = self.U primals_5 = self.V primals_6 = self.W primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
db-bionlp/CLNER
QuadriLinearScore
false
15,165
[ "MIT" ]
46
77910311acf0411252b9fea8c3e6efb7175eb21f
https://github.com/db-bionlp/CLNER/tree/77910311acf0411252b9fea8c3e6efb7175eb21f
DecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xh/cxh3jsabrreygmtqbioiwu4irvko2pq4pftuu4swcr54wp3ipunn.py # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_1 => div_1, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dv/cdvnfolre5t45bhprto4yywwjbghf5vuy2hz7zcisvjh3svypamc.py # Topologically Sorted Source Nodes: [sum_1, att_weights], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # att_weights => div_2 # sum_1 => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_2, [1]), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 2), kwargs = {}) triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mt/cmtuc2xel6zxbpo36z3ucsy35hwatmen2g4hgvskoz2avgfqjnhs.py # Topologically Sorted Source Nodes: [x, x2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x => add_2 # x2_1 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_11), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j6/cj6a5nxffd4gwkujroxv5rcat5smuf5lcr73lidagq4kp5rnoord.py # Topologically Sorted Source Nodes: [x, x2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x => add_2 # x2_1 => add_3, add_4, mul_2, mul_3, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_11), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_12), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_13), kwargs = {}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = (xindex // 4) x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x5), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7y/c7y56bfcp3tbow6bnl5mnl3pgzgiplulza2aqzgubrrb2fm6djiq.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_13,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f7/cf7imvrhlanblem72bj73lqt7qawtllyui6culwvyex56wp6aysd.py # Topologically Sorted Source Nodes: [x, x_3], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add_2 # x_3 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_15), kwargs = {}) triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x4), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (2048, 4), (4, 1)) assert_size_stride(primals_15, (2048, ), (1, )) assert_size_stride(primals_16, (4, 2048), (2048, 1)) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del buf0 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 1, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, 16, grid=grid(16), stream=stream0) buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf1, (4, 1, 1), (1, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [sum_1, att_weights], Original ATen: [aten.sum, aten.div] triton_poi_fused_div_sum_3.run(buf7, buf9, 4, grid=grid(4), stream=stream0) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_11 buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x, x2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(primals_3, buf10, buf11, buf12, 16, grid=grid(16), stream=stream0) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf10, buf11, buf12, primals_12, primals_13, buf13, 64, grid=grid(64), stream=stream0) del buf11 del buf12 del primals_13 buf14 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 2048), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 2048), (8192, 2048, 1), 0); del buf14 # reuse buf18 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_6.run(buf15, primals_15, buf18, 32768, grid=grid(32768), stream=stream0) del primals_15 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_16, (2048, 4), (1, 2048), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [x, x_3], Original ATen: [aten.add] triton_poi_fused_add_7.run(buf17, primals_3, buf10, primals_17, 64, grid=grid(64), stream=stream0) del primals_17 return (buf17, buf9, primals_3, primals_12, buf2, buf7, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), primals_16, buf18, primals_14, primals_10, reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 4, 1), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F def attention(q, k, v, d_k, mask=None, dropout=None): """ :param q: queries, B x N_HEADS x seq_len x d_k :param k: keys, same dim as q :param v: values, same dim as q :param d_k: d_model/n_heads = 128/8 = 16 :param mask: mask for padding and future steps in the scores! :param dropout: dropout layer if any :return: attention vector of shape B x N_HEADS x seq_len x d_k """ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0) scores = F.softmax(scores, dim=-1) if dropout is not None: None scores = dropout(scores) output = torch.matmul(scores, v) return output, scores class FeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.0): super().__init__() self.linear_1 = nn.Linear(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(d_ff, d_model) def forward(self, x): x = self.dropout(F.relu(self.linear_1(x))) x = self.linear_2(x) return x class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout=0.0): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) if dropout > 0 else None self.out = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) att_output, att_weights = attention(q, k, v, self.d_k, mask, self. dropout) att_weights = att_weights.detach()[:, -2:].sum(dim=1) / 2 concat = att_output.transpose(1, 2).contiguous().view(bs, -1, self. d_model) output = self.out(concat) return output, att_weights class DecoderLayer(nn.Module): def __init__(self, d_model, heads, dropout=0.1): super().__init__() self.norm_1 = nn.LayerNorm(d_model) self.norm_2 = nn.LayerNorm(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout) self.ff = FeedForward(d_model) def forward(self, x, mask=None): x2 = self.norm_1(x) t, avg_scores = self.attn_1(x2, x2, x2, mask) x = x + self.dropout_1(t) x2 = self.norm_2(x) x = x + self.dropout_2(self.ff(x2)) return x, avg_scores def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_out_ptr0 + x4, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x4, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (2048, 4), (4, 1)) assert_size_stride(primals_15, (2048,), (1,)) assert_size_stride(primals_16, (4, 2048), (2048, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 1, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf6 triton_poi_fused__softmax_2[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf1, (4, 1, 1), (1, 1, 1), 0) del buf1 triton_poi_fused_div_sum_3[grid(4)](buf7, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_11 buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_3, buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_3, buf10, buf11, buf12, primals_12, primals_13, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 del buf12 del primals_13 buf14 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 2048), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 2048), (8192, 2048, 1), 0) del buf14 buf18 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_6[grid(32768)](buf15, primals_15, buf18, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_16, (2048, 4), (1, 2048), 0), out=buf16) buf17 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused_add_7[grid(64)](buf17, primals_3, buf10, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 return buf17, buf9, primals_3, primals_12, buf2, buf7, reinterpret_tensor( buf8, (4, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(buf15, (16, 2048), (2048, 1), 0 ), primals_16, buf18, primals_14, primals_10, reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 4, 1), 0 ), primals_8, primals_6, primals_4 def attention(q, k, v, d_k, mask=None, dropout=None): """ :param q: queries, B x N_HEADS x seq_len x d_k :param k: keys, same dim as q :param v: values, same dim as q :param d_k: d_model/n_heads = 128/8 = 16 :param mask: mask for padding and future steps in the scores! :param dropout: dropout layer if any :return: attention vector of shape B x N_HEADS x seq_len x d_k """ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0) scores = F.softmax(scores, dim=-1) if dropout is not None: None scores = dropout(scores) output = torch.matmul(scores, v) return output, scores class FeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.0): super().__init__() self.linear_1 = nn.Linear(d_model, d_ff) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(d_ff, d_model) def forward(self, x): x = self.dropout(F.relu(self.linear_1(x))) x = self.linear_2(x) return x class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout=0.0): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) if dropout > 0 else None self.out = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) att_output, att_weights = attention(q, k, v, self.d_k, mask, self. dropout) att_weights = att_weights.detach()[:, -2:].sum(dim=1) / 2 concat = att_output.transpose(1, 2).contiguous().view(bs, -1, self. d_model) output = self.out(concat) return output, att_weights class DecoderLayerNew(nn.Module): def __init__(self, d_model, heads, dropout=0.1): super().__init__() self.norm_1 = nn.LayerNorm(d_model) self.norm_2 = nn.LayerNorm(d_model) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout) self.ff = FeedForward(d_model) def forward(self, input_0): primals_1 = self.norm_1.weight primals_2 = self.norm_1.bias primals_5 = self.norm_2.weight primals_7 = self.norm_2.bias primals_3 = self.attn_1.q_linear.weight primals_9 = self.attn_1.q_linear.bias primals_4 = self.attn_1.v_linear.weight primals_11 = self.attn_1.v_linear.bias primals_6 = self.attn_1.k_linear.weight primals_12 = self.attn_1.k_linear.bias primals_8 = self.attn_1.out.weight primals_13 = self.attn_1.out.bias primals_14 = self.ff.linear_1.weight primals_15 = self.ff.linear_1.bias primals_16 = self.ff.linear_2.weight primals_17 = self.ff.linear_2.bias primals_10 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
davide-belli/generative-graph-transformer
DecoderLayer
false
15,166
[ "MIT" ]
51
949aacf57246e8c28df7dfa38e5c59bf8b2b0ee8
https://github.com/davide-belli/generative-graph-transformer/tree/949aacf57246e8c28df7dfa38e5c59bf8b2b0ee8
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zb/czbtvtaquu6hd4qsgmoykgrs64gmpitzjrr366bcqkjsx3el44xo.py # Topologically Sorted Source Nodes: [sub, add, sqrt, x_1, mul, x_2], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # sqrt => sqrt # sub => sub # x_1 => div # x_2 => add_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %unsqueeze), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1e-06), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_1), kwargs = {}) triton_poi_fused_add_div_mul_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, add, sqrt, x_1, mul, x_2], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_sub_0.run(primals_2, primals_1, primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch as th import torch.nn as nn from torch.nn import Parameter class LayerNorm(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf """ def __init__(self, input_size: 'int', learnable: 'bool'=True, epsilon: 'float'=1e-06): super(LayerNorm, self).__init__() self.input_size = input_size self.learnable = learnable self.alpha = th.empty(1, input_size).fill_(0) self.beta = th.empty(1, input_size).fill_(0) self.epsilon = epsilon if learnable: self.alpha = Parameter(self.alpha) self.beta = Parameter(self.beta) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.input_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x: 'th.Tensor') ->th.Tensor: size = x.size() x = x.view(x.size(0), -1) x = (x - th.mean(x, 1).unsqueeze(1)) / th.sqrt(th.var(x, 1). unsqueeze(1) + self.epsilon) if self.learnable: x = self.alpha.expand_as(x) * x + self.beta.expand_as(x) return x.view(size) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch as th import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sqrt_sub_0[grid(16)](primals_2, primals_1, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf """ def __init__(self, input_size: 'int', learnable: 'bool'=True, epsilon: 'float'=1e-06): super(LayerNormNew, self).__init__() self.input_size = input_size self.learnable = learnable self.alpha = th.empty(1, input_size).fill_(0) self.beta = th.empty(1, input_size).fill_(0) self.epsilon = epsilon if learnable: self.alpha = Parameter(self.alpha) self.beta = Parameter(self.beta) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.input_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
denizetkar/lstms.pth
LayerNorm
false
15,167
[ "Apache-2.0" ]
130
c1d6af1e106e17c51604ae8acdb5114828adff19
https://github.com/denizetkar/lstms.pth/tree/c1d6af1e106e17c51604ae8acdb5114828adff19
BaLayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nh/cnh2aw4nwmnig5gt5lckftrhd3hv27p53zaaoepmddp2y5zppfn3.py # Topologically Sorted Source Nodes: [center], Original ATen: [aten.sub] # Source node to ATen node mapping: # center => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %unsqueeze), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sz/cszfp3x3pks3fytthif3lw3uiwokhqam3le3qlf4t44wbcanqoi3.py # Topologically Sorted Source Nodes: [add, output, mul, output_1], Original ATen: [aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # output => div # output_1 => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mul_1 = async_compile.triton('triton_poi_fused_add_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [center], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, output, mul, output_1], Original ATen: [aten.add, aten.div, aten.mul] triton_poi_fused_add_div_mul_1.run(primals_2, buf0, primals_3, buf1, 16, grid=grid(16), stream=stream0) del buf0 del primals_2 del primals_3 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as th import torch.nn as nn from torch.nn import Parameter class BaLayerNorm(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf This implementation mimicks the original torch implementation at: https://github.com/ryankiros/layer-norm/blob/master/torch_modules/LayerNormalization.lua """ def __init__(self, input_size: 'int', learnable: 'bool'=True, epsilon: 'float'=1e-05): super(BaLayerNorm, self).__init__() self.input_size = input_size self.learnable = learnable self.epsilon = epsilon self.alpha = th.empty(1, input_size).fill_(0) self.beta = th.empty(1, input_size).fill_(0) if learnable: self.alpha = Parameter(self.alpha) self.beta = Parameter(self.beta) def forward(self, x: 'th.Tensor') ->th.Tensor: size = x.size() x = x.view(x.size(0), -1) mean = th.mean(x, 1).unsqueeze(1) center = x - mean std = th.sqrt(th.mean(th.square(center), 1)).unsqueeze(1) output = center / (std + self.epsilon) if self.learnable: output = self.alpha * output + self.beta return output.view(size) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch as th import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_1[grid(16)](primals_2, buf0, primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class BaLayerNormNew(nn.Module): """ Layer Normalization based on Ba & al.: 'Layer Normalization' https://arxiv.org/pdf/1607.06450.pdf This implementation mimicks the original torch implementation at: https://github.com/ryankiros/layer-norm/blob/master/torch_modules/LayerNormalization.lua """ def __init__(self, input_size: 'int', learnable: 'bool'=True, epsilon: 'float'=1e-05): super(BaLayerNormNew, self).__init__() self.input_size = input_size self.learnable = learnable self.epsilon = epsilon self.alpha = th.empty(1, input_size).fill_(0) self.beta = th.empty(1, input_size).fill_(0) if learnable: self.alpha = Parameter(self.alpha) self.beta = Parameter(self.beta) def forward(self, input_0): primals_2 = self.alpha primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
denizetkar/lstms.pth
BaLayerNorm
false
15,168
[ "Apache-2.0" ]
130
c1d6af1e106e17c51604ae8acdb5114828adff19
https://github.com/denizetkar/lstms.pth/tree/c1d6af1e106e17c51604ae8acdb5114828adff19
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ov/covk2vkuzvrnevl7yr4q6iosnxzhisgmyrsakfuuvnmbfu3gjixz.py # Topologically Sorted Source Nodes: [conv2d, h, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.relu, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # conv2d => convolution # conv2d_1 => convolution_1 # g => sigmoid # h => relu # mul => mul # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %sigmoid), kwargs = {}) triton_poi_fused_convolution_mul_relu_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_relu_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_relu_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_relu_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 81) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp2) tmp8 = tl.sigmoid(tmp5) tmp9 = tmp7 * tmp8 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, h, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.relu, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_convolution_mul_relu_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 1296, grid=grid(1296), stream=stream0) del primals_2 del primals_5 return (buf4, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedConv2d(nn.Module): def __init__(self, in_c, out_c, kernel, stride, pad, dilation=1, act= torch.relu): super(GatedConv2d, self).__init__() self.activation = act self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(in_c, out_c, kernel, stride, pad, dilation) self.g = nn.Conv2d(in_c, out_c, kernel, stride, pad, dilation) def forward(self, x): h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_c': 4, 'out_c': 4, 'kernel': 4, 'stride': 1, 'pad': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_relu_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp2) tmp8 = tl.sigmoid(tmp5) tmp9 = tmp7 * tmp8 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_relu_sigmoid_0[grid(1296)](buf1, buf3, primals_2, primals_5, buf4, 1296, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf3 class GatedConv2dNew(nn.Module): def __init__(self, in_c, out_c, kernel, stride, pad, dilation=1, act= torch.relu): super(GatedConv2dNew, self).__init__() self.activation = act self.sigmoid = nn.Sigmoid() self.h = nn.Conv2d(in_c, out_c, kernel, stride, pad, dilation) self.g = nn.Conv2d(in_c, out_c, kernel, stride, pad, dilation) def forward(self, input_0): primals_1 = self.h.weight primals_2 = self.h.bias primals_3 = self.g.weight primals_5 = self.g.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dendisuhubdy/flow_synthesizer
GatedConv2d
false
15,169
[ "MIT" ]
93
1561e8ce2520258acb3d228beebbb626a8abc04f
https://github.com/dendisuhubdy/flow_synthesizer/tree/1561e8ce2520258acb3d228beebbb626a8abc04f
MinibatchStddev
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ch/cch6k75dxab6zgfefoetubcglkqoofyirbpzbtjxg7stah6ekidu.py # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, h], Original ATen: [aten.mean, aten.sub, aten.pow, aten.cat] # Source node to ATen node mapping: # h => cat # mean => mean # mean_1 => mean_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %expand], 1), kwargs = {}) triton_per_fused_cat_mean_pow_sub_0 = async_compile.triton('triton_per_fused_cat_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_mean_pow_sub_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 64 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + (tl.broadcast_to(r1 + (80*r2), [RBLOCK])), tmp0, None) tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tp/ctpdg2ecscl5t55pjqgwqedimjrcbfrozz5nlbdl7n7mqsczkcqy.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] # Source node to ATen node mapping: # h => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %expand], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 256.0 tmp3 = tmp1 / tmp2 tmp4 = 1e-08 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tl.store(out_ptr0 + (x0 + (80*x1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, h], Original ATen: [aten.mean, aten.sub, aten.pow, aten.cat] stream0 = get_raw_stream(0) triton_per_fused_cat_mean_pow_sub_0.run(buf1, arg0_1, buf2, 1, 256, grid=grid(1), stream=stream0) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf3, 64, grid=grid(64), stream=stream0) del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def Tstdeps(val): return torch.sqrt(((val - val.mean()) ** 2).mean() + 1e-08) class MinibatchStddev(nn.Module): def __init__(self): super(MinibatchStddev, self).__init__() self.eps = 1.0 def forward(self, x): stddev_mean = Tstdeps(x) new_channel = stddev_mean.expand(x.size(0), 1, x.size(2), x.size(3)) h = torch.cat((x, new_channel), dim=1) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_mean_pow_sub_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 64 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + tl.broadcast_to(r1 + 80 * r2, [RBLOCK]), tmp0, None) tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 256.0 tmp3 = tmp1 / tmp2 tmp4 = 1e-08 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tl.store(out_ptr0 + (x0 + 80 * x1), tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) get_raw_stream(0) triton_per_fused_cat_mean_pow_sub_0[grid(1)](buf1, arg0_1, buf2, 1, 256, num_warps=2, num_stages=1) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_poi_fused_cat_1[grid(64)](buf1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf4, def Tstdeps(val): return torch.sqrt(((val - val.mean()) ** 2).mean() + 1e-08) class MinibatchStddevNew(nn.Module): def __init__(self): super(MinibatchStddevNew, self).__init__() self.eps = 1.0 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
deepsound-project/pggan-pytorch
MinibatchStddev
false
15,170
[ "MIT" ]
115
dab2ec79229c3800253a209304dbb1e7ac1d1219
https://github.com/deepsound-project/pggan-pytorch/tree/dab2ec79229c3800253a209304dbb1e7ac1d1219
ChanNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ld/cldjop57q475tbpoqcv576vjehue6t4nclhegc4tag6qhroxiw2u.py # Topologically Sorted Source Nodes: [var, std, mean, sub, add, truediv, mul, add_1], Original ATen: [aten.var, aten.sqrt, aten.mean, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt # sub => sub # truediv => div # var => var # Graph fragment: # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + (x3), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var, std, mean, sub, add, truediv, mul, add_1], Original ATen: [aten.var, aten.sqrt, aten.mean, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ChanNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt() mean = torch.mean(x, dim=1, keepdim=True) return (x - mean) / (std + self.eps) * self.g + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class ChanNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, input_0): primals_2 = self.g primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
destefani/lightweight-gan
ChanNorm
false
15,171
[ "MIT" ]
1,187
5ba61c21c8c9c8d4574a4a3ddd4759f86debf9bf
https://github.com/destefani/lightweight-gan/tree/5ba61c21c8c9c8d4574a4a3ddd4759f86debf9bf
GatedDense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oo/coozqk6bqctk7fxhlwoitvlz2474hersjipphjkqeljrkanhrlgk.py # Topologically Sorted Source Nodes: [h_1, g, mul], Original ATen: [aten.relu, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # g => sigmoid # h_1 => relu # mul => mul # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %sigmoid), kwargs = {}) triton_poi_fused_mul_relu_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_relu_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1, g, mul], Original ATen: [aten.relu, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_sigmoid_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedDense(nn.Module): def __init__(self, input_size, output_size, activation=torch.relu): super(GatedDense, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Linear(input_size, output_size) self.g = nn.Linear(input_size, output_size) def forward(self, x): h = self.h(x) if self.activation is not None: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sigmoid_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 class GatedDenseNew(nn.Module): def __init__(self, input_size, output_size, activation=torch.relu): super(GatedDenseNew, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Linear(input_size, output_size) self.g = nn.Linear(input_size, output_size) def forward(self, input_0): primals_1 = self.h.weight primals_2 = self.h.bias primals_4 = self.g.weight primals_5 = self.g.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dendisuhubdy/flow_synthesizer
GatedDense
false
15,172
[ "MIT" ]
93
1561e8ce2520258acb3d228beebbb626a8abc04f
https://github.com/dendisuhubdy/flow_synthesizer/tree/1561e8ce2520258acb3d228beebbb626a8abc04f
LinearBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from functools import partial import torch.nn as nn def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class LinearBlock(nn.Module): """Pre-activate linear block""" def __init__(self, C_in, C_out, norm='none', activ='relu', bias=True, w_norm='none', dropout=0.0): super().__init__() activ = activ_dispatch(activ) if norm.lower() == 'bn': norm = nn.BatchNorm1d elif norm.lower() == 'none': norm = nn.Identity else: raise ValueError( f'LinearBlock supports BN only (but {norm} is given)') w_norm = w_norm_dispatch(w_norm) self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout(p=dropout) self.linear = w_norm(nn.Linear(C_in, C_out, bias)) def forward(self, x): x = self.norm(x) x = self.activ(x) if hasattr(self, 'dropout'): x = self.dropout(x) return self.linear(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C_in': 4, 'C_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from functools import partial import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class LinearBlockNew(nn.Module): """Pre-activate linear block""" def __init__(self, C_in, C_out, norm='none', activ='relu', bias=True, w_norm='none', dropout=0.0): super().__init__() activ = activ_dispatch(activ) if norm.lower() == 'bn': norm = nn.BatchNorm1d elif norm.lower() == 'none': norm = nn.Identity else: raise ValueError( f'LinearBlock supports BN only (but {norm} is given)') w_norm = w_norm_dispatch(w_norm) self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout(p=dropout) self.linear = w_norm(nn.Linear(C_in, C_out, bias)) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
derwind/dmfont
LinearBlock
false
15,173
[ "MIT" ]
95
17a91a9cc1917d2485eaa8e92b68245578920c76
https://github.com/derwind/dmfont/tree/17a91a9cc1917d2485eaa8e92b68245578920c76
PopulationColourRGBTransforms
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3i/c3isf4ke3rahvwor7smmcgchuksnbyt3bx5jdpztvbwijjqerzt5.py # Topologically Sorted Source Nodes: [colours], Original ATen: [aten.cat] # Source node to ATen node mapping: # colours => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%clamp_max, %clamp_max_1, %clamp_max_2, %primals_5, %clamp_max_3], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp16 = tl.load(in_ptr1 + (0)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp26 = tl.load(in_ptr2 + (0)) tmp27 = tl.broadcast_to(tmp26, [XBLOCK]) tmp36 = tl.load(in_ptr3 + (0)) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp41 = tl.load(in_ptr4 + (0)) tmp42 = tl.broadcast_to(tmp41, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = 4.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 2, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp18 = triton_helpers.maximum(tmp17, tmp7) tmp19 = triton_helpers.minimum(tmp18, tmp7) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tmp0 >= tmp13 tmp23 = tl.full([1], 3, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp7) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tmp0 >= tmp23 tmp33 = tl.full([1], 4, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp38 = tmp0 >= tmp33 tmp39 = tl.full([1], 5, tl.int64) tmp40 = tmp0 < tmp39 tmp43 = 0.0 tmp44 = triton_helpers.maximum(tmp42, tmp43) tmp45 = 1.0 tmp46 = triton_helpers.minimum(tmp44, tmp45) tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp38, tmp46, tmp47) tmp49 = tl.where(tmp35, tmp37, tmp48) tmp50 = tl.where(tmp25, tmp31, tmp49) tmp51 = tl.where(tmp15, tmp21, tmp50) tmp52 = tl.where(tmp4, tmp11, tmp51) tl.store(out_ptr0 + (x0), tmp52, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tk/ctklu7t74uopguzhx7pa32u4nabukme7bhcngvxjsuhvqkydni7u.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %primals_6), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bj/cbjynfyokuqclbxjmxut6f7u3t3ll57y6okui2tpwi4mr2auvdsd.py # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_1, 4), kwargs = {}) # %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 4), kwargs = {}) triton_poi_fused_clamp_2 = async_compile.triton('triton_poi_fused_clamp_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 4.0 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = triton_helpers.minimum(tmp3, tmp2) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2v/c2vgtvaym45qwxivowsz56rkt3ads3abj4fug33bfefg6yqbatw7.py # Topologically Sorted Source Nodes: [clamp_3], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp_3 => clamp_max_3, clamp_min_3 # Graph fragment: # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_4, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) triton_poi_fused_clamp_3 = async_compile.triton('triton_poi_fused_clamp_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 0.0 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 1.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_2, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_3, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_4, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_5, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 5, 1, 1), (5, 5, 1, 5, 5), torch.float32) # Topologically Sorted Source Nodes: [colours], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, primals_5, primals_4, buf0, 5, grid=grid(5), stream=stream0) del primals_5 buf1 = empty_strided_cuda((1, 1, 5, 4, 4), (80, 80, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf0, primals_6, buf1, 80, grid=grid(80), stream=stream0) buf2 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] triton_poi_fused_clamp_2.run(primals_1, buf2, 1, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] buf3 = torch.ops.aten.set_.source_Tensor(primals_1, buf2) assert_size_stride(buf3, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_1 buf7 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp_1], Original ATen: [aten.clamp] triton_poi_fused_clamp_2.run(primals_2, buf7, 1, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [clamp_1], Original ATen: [aten.clamp] buf8 = torch.ops.aten.set_.source_Tensor(primals_2, buf7) assert_size_stride(buf8, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_2 buf12 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp_2], Original ATen: [aten.clamp] triton_poi_fused_clamp_2.run(primals_3, buf12, 1, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [clamp_2], Original ATen: [aten.clamp] buf13 = torch.ops.aten.set_.source_Tensor(primals_3, buf12) assert_size_stride(buf13, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_3 buf17 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp_3], Original ATen: [aten.clamp] triton_poi_fused_clamp_3.run(primals_4, buf17, 1, grid=grid(1), stream=stream0) # Topologically Sorted Source Nodes: [clamp_3], Original ATen: [aten.clamp] buf18 = torch.ops.aten.set_.source_Tensor(primals_4, buf17) assert_size_stride(buf18, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del buf0 del primals_4 return (buf1, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import numpy as np class PopulationColourRGBTransforms(torch.nn.Module): """RGB color transforms and ordering of patches.""" def __init__(self, config, device, num_patches=1, pop_size=1, requires_grad=True): super(PopulationColourRGBTransforms, self).__init__() self.config = config self.device = device None self._pop_size = pop_size None rgb_init_range = self.config['initial_max_rgb'] - self.config[ 'initial_min_rgb'] population_reds = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_greens = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_blues = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_zeros = np.ones((pop_size, num_patches, 1, 1, 1)) population_orders = np.random.rand(pop_size, num_patches, 1, 1, 1) self.reds = torch.nn.Parameter(torch.tensor(population_reds, dtype= torch.float), requires_grad=requires_grad) self.greens = torch.nn.Parameter(torch.tensor(population_greens, dtype=torch.float), requires_grad=requires_grad) self.blues = torch.nn.Parameter(torch.tensor(population_blues, dtype=torch.float), requires_grad=requires_grad) self._zeros = torch.nn.Parameter(torch.tensor(population_zeros, dtype=torch.float), requires_grad=False) self.orders = torch.nn.Parameter(torch.tensor(population_orders, dtype=torch.float), requires_grad=requires_grad) def _clamp(self): self.reds.data = self.reds.data.clamp(min=self.config['min_rgb'], max=self.config['max_rgb']) self.greens.data = self.greens.data.clamp(min=self.config['min_rgb' ], max=self.config['max_rgb']) self.blues.data = self.blues.data.clamp(min=self.config['min_rgb'], max=self.config['max_rgb']) self.orders.data = self.orders.data.clamp(min=0.0, max=1.0) def copy_and_mutate_s(self, parent, child): with torch.no_grad(): self.reds[child, ...] = self.reds[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.reds[child, ... ].shape) self.greens[child, ...] = self.greens[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.greens[child, ...].shape) self.blues[child, ...] = self.blues[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.blues[child, ...].shape) self.orders[child, ...] = self.orders[parent, ...] def copy_from(self, other, idx_to, idx_from): """Copy parameters from other colour transform, for selected indices.""" assert idx_to < self._pop_size with torch.no_grad(): self.reds[idx_to, ...] = other.reds[idx_from, ...] self.greens[idx_to, ...] = other.greens[idx_from, ...] self.blues[idx_to, ...] = other.blues[idx_from, ...] self.orders[idx_to, ...] = other.orders[idx_from, ...] def forward(self, x): self._clamp() colours = torch.cat([self.reds, self.greens, self.blues, self. _zeros, self.orders], 2) return colours * x def tensor_to(self, device): self.reds = self.reds self.greens = self.greens self.blues = self.blues self.orders = self.orders self._zeros = self._zeros def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(initial_max_rgb=4, initial_min_rgb= 4, min_rgb=4, max_rgb=4), 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 0) tmp27 = tl.broadcast_to(tmp26, [XBLOCK]) tmp36 = tl.load(in_ptr3 + 0) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp41 = tl.load(in_ptr4 + 0) tmp42 = tl.broadcast_to(tmp41, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = 4.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 2, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp18 = triton_helpers.maximum(tmp17, tmp7) tmp19 = triton_helpers.minimum(tmp18, tmp7) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tmp0 >= tmp13 tmp23 = tl.full([1], 3, tl.int64) tmp24 = tmp0 < tmp23 tmp25 = tmp22 & tmp24 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp7) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tmp0 >= tmp23 tmp33 = tl.full([1], 4, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp38 = tmp0 >= tmp33 tl.full([1], 5, tl.int64) tmp43 = 0.0 tmp44 = triton_helpers.maximum(tmp42, tmp43) tmp45 = 1.0 tmp46 = triton_helpers.minimum(tmp44, tmp45) tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp38, tmp46, tmp47) tmp49 = tl.where(tmp35, tmp37, tmp48) tmp50 = tl.where(tmp25, tmp31, tmp49) tmp51 = tl.where(tmp15, tmp21, tmp50) tmp52 = tl.where(tmp4, tmp11, tmp51) tl.store(out_ptr0 + x0, tmp52, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 4.0 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = triton_helpers.minimum(tmp3, tmp2) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None) @triton.jit def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 0.0 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = 1.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_2, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_3, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_4, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_5, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 5, 1, 1), (5, 5, 1, 5, 5), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(5)](primals_1, primals_2, primals_3, primals_5, primals_4, buf0, 5, XBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf1 = empty_strided_cuda((1, 1, 5, 4, 4), (80, 80, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(80)](buf0, primals_6, buf1, 80, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch. float32) triton_poi_fused_clamp_2[grid(1)](primals_1, buf2, 1, XBLOCK=1, num_warps=1, num_stages=1) buf3 = torch.ops.aten.set_.source_Tensor(primals_1, buf2) assert_size_stride(buf3, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_1 buf7 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch. float32) triton_poi_fused_clamp_2[grid(1)](primals_2, buf7, 1, XBLOCK=1, num_warps=1, num_stages=1) buf8 = torch.ops.aten.set_.source_Tensor(primals_2, buf7) assert_size_stride(buf8, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_2 buf12 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch. float32) triton_poi_fused_clamp_2[grid(1)](primals_3, buf12, 1, XBLOCK=1, num_warps=1, num_stages=1) buf13 = torch.ops.aten.set_.source_Tensor(primals_3, buf12) assert_size_stride(buf13, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del primals_3 buf17 = empty_strided_cuda((1, 1, 1, 1, 1), (1, 1, 1, 1, 1), torch. float32) triton_poi_fused_clamp_3[grid(1)](primals_4, buf17, 1, XBLOCK=1, num_warps=1, num_stages=1) buf18 = torch.ops.aten.set_.source_Tensor(primals_4, buf17) assert_size_stride(buf18, (1, 1, 1, 1, 1), (1, 1, 1, 1, 1)) del buf0 del primals_4 return buf1, primals_6 class PopulationColourRGBTransformsNew(torch.nn.Module): """RGB color transforms and ordering of patches.""" def __init__(self, config, device, num_patches=1, pop_size=1, requires_grad=True): super(PopulationColourRGBTransformsNew, self).__init__() self.config = config self.device = device None self._pop_size = pop_size None rgb_init_range = self.config['initial_max_rgb'] - self.config[ 'initial_min_rgb'] population_reds = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_greens = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_blues = np.random.rand(pop_size, num_patches, 1, 1, 1 ) * rgb_init_range + self.config['initial_min_rgb'] population_zeros = np.ones((pop_size, num_patches, 1, 1, 1)) population_orders = np.random.rand(pop_size, num_patches, 1, 1, 1) self.reds = torch.nn.Parameter(torch.tensor(population_reds, dtype= torch.float), requires_grad=requires_grad) self.greens = torch.nn.Parameter(torch.tensor(population_greens, dtype=torch.float), requires_grad=requires_grad) self.blues = torch.nn.Parameter(torch.tensor(population_blues, dtype=torch.float), requires_grad=requires_grad) self._zeros = torch.nn.Parameter(torch.tensor(population_zeros, dtype=torch.float), requires_grad=False) self.orders = torch.nn.Parameter(torch.tensor(population_orders, dtype=torch.float), requires_grad=requires_grad) def _clamp(self): self.reds.data = self.reds.data.clamp(min=self.config['min_rgb'], max=self.config['max_rgb']) self.greens.data = self.greens.data.clamp(min=self.config['min_rgb' ], max=self.config['max_rgb']) self.blues.data = self.blues.data.clamp(min=self.config['min_rgb'], max=self.config['max_rgb']) self.orders.data = self.orders.data.clamp(min=0.0, max=1.0) def copy_and_mutate_s(self, parent, child): with torch.no_grad(): self.reds[child, ...] = self.reds[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.reds[child, ... ].shape) self.greens[child, ...] = self.greens[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.greens[child, ...].shape) self.blues[child, ...] = self.blues[parent, ...] + self.config[ 'colour_mutation_scale'] * torch.randn(self.blues[child, ...].shape) self.orders[child, ...] = self.orders[parent, ...] def copy_from(self, other, idx_to, idx_from): """Copy parameters from other colour transform, for selected indices.""" assert idx_to < self._pop_size with torch.no_grad(): self.reds[idx_to, ...] = other.reds[idx_from, ...] self.greens[idx_to, ...] = other.greens[idx_from, ...] self.blues[idx_to, ...] = other.blues[idx_from, ...] self.orders[idx_to, ...] = other.orders[idx_from, ...] def tensor_to(self, device): self.reds = self.reds self.greens = self.greens self.blues = self.blues self.orders = self.orders self._zeros = self._zeros def forward(self, input_0): primals_1 = self.reds primals_2 = self.greens primals_3 = self.blues primals_4 = self._zeros primals_5 = self.orders primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
deepmind/arnheim
PopulationColourRGBTransforms
false
15,174
[ "Apache-2.0" ]
186
cc9d2dd12391faa460b58bff1cc5be82145a5965
https://github.com/deepmind/arnheim/tree/cc9d2dd12391faa460b58bff1cc5be82145a5965
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tf/ctfli7vqflj6i63a7y5mg3d6qberai23wf7yzknmpncgz4s6xcpo.py # Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.relu, aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # x => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_relu_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + (x4), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.relu, aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from functools import partial import torch.nn as nn def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] @dispatcher def norm_dispatch(norm): return {'none': nn.Identity, 'in': partial(nn.InstanceNorm2d, affine= False), 'bn': nn.BatchNorm2d}[norm.lower()] @dispatcher def pad_dispatch(pad_type): return {'zero': nn.ZeroPad2d, 'replicate': nn.ReplicationPad2d, 'reflect': nn.ReflectionPad2d}[pad_type.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class ConvBlock(nn.Module): """Pre-activate conv block""" def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, norm='none', activ='relu', bias=True, upsample=False, downsample= False, w_norm='none', pad_type='zero', dropout=0.0): if kernel_size == 1: assert padding == 0 super().__init__() self.C_in = C_in self.C_out = C_out activ = activ_dispatch(activ) norm = norm_dispatch(norm) w_norm = w_norm_dispatch(w_norm) pad = pad_dispatch(pad_type) self.upsample = upsample self.downsample = downsample self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout2d(p=dropout) self.pad = pad(padding) self.conv = w_norm(nn.Conv2d(C_in, C_out, kernel_size, stride, bias =bias)) def forward(self, x): x = self.norm(x) x = self.activ(x) if self.upsample: x = F.interpolate(x, scale_factor=2) if hasattr(self, 'dropout'): x = self.dropout(x) x = self.conv(self.pad(x)) if self.downsample: x = F.avg_pool2d(x, 2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C_in': 4, 'C_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from functools import partial import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + x4, tmp15, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] @dispatcher def norm_dispatch(norm): return {'none': nn.Identity, 'in': partial(nn.InstanceNorm2d, affine= False), 'bn': nn.BatchNorm2d}[norm.lower()] @dispatcher def pad_dispatch(pad_type): return {'zero': nn.ZeroPad2d, 'replicate': nn.ReplicationPad2d, 'reflect': nn.ReflectionPad2d}[pad_type.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class ConvBlockNew(nn.Module): """Pre-activate conv block""" def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, norm='none', activ='relu', bias=True, upsample=False, downsample= False, w_norm='none', pad_type='zero', dropout=0.0): if kernel_size == 1: assert padding == 0 super().__init__() self.C_in = C_in self.C_out = C_out activ = activ_dispatch(activ) norm = norm_dispatch(norm) w_norm = w_norm_dispatch(w_norm) pad = pad_dispatch(pad_type) self.upsample = upsample self.downsample = downsample self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout2d(p=dropout) self.pad = pad(padding) self.conv = w_norm(nn.Conv2d(C_in, C_out, kernel_size, stride, bias =bias)) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
derwind/dmfont
ConvBlock
false
15,175
[ "MIT" ]
95
17a91a9cc1917d2485eaa8e92b68245578920c76
https://github.com/derwind/dmfont/tree/17a91a9cc1917d2485eaa8e92b68245578920c76
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [query], Original ATen: [aten.convolution] # Source node to ATen node mapping: # query => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py # Topologically Sorted Source Nodes: [attn_w], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_w => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_8, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_8, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/li/cliuki7kw34pevna3gnu3ihq5qfn3kvidywzkiifm6ybijryhkq6.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_13, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [query], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) # Topologically Sorted Source Nodes: [key], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 16), (64, 16, 1)) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 16), (64, 16, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [query], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [key], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf4, primals_6, 256, grid=grid(256), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 1, 16, 16), (256, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_w], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf5, buf8, 64, 16, grid=grid(64), stream=stream0) del buf5 buf9 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [value], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf9, primals_8, 256, grid=grid(256), stream=stream0) del primals_8 buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (4, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), out=buf10) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 1, 16, 4)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf12, primals_10, 256, grid=grid(256), stream=stream0) del primals_10 return (buf12, primals_3, primals_5, primals_7, primals_9, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), buf8, reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf9, buf3, reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] def split_dim(x, dim, n_chunks): shape = x.shape assert shape[dim] % n_chunks == 0 return x.view(*shape[:dim], n_chunks, shape[dim] // n_chunks, *shape[ dim + 1:]) class RelativePositionalEmbedding2d(nn.Module): """ Learned relative positional embedding return Q * (R_x + R_y) for input Q and learned embedding R """ def __init__(self, emb_dim, H, W, down_kv=False): super().__init__() self.H = H self.W = W self.down_kv = down_kv self.h_emb = nn.Embedding(H * 2 - 1, emb_dim) self.w_emb = nn.Embedding(W * 2 - 1, emb_dim) rel_y, rel_x = self.rel_grid() self.register_buffer('rel_y', rel_y) self.register_buffer('rel_x', rel_x) def rel_grid(self): y, x = torch.meshgrid(torch.arange(self.H), torch.arange(self.W)) rel_y = y.reshape(1, -1) - y.reshape(-1, 1) rel_x = x.reshape(1, -1) - x.reshape(-1, 1) if self.down_kv: def down(x): n_q, n_k = x.shape x = x.view(n_q, 1, int(n_k ** 0.5), int(n_k ** 0.5)) return (F.avg_pool2d(x.float(), 2) - 0.5).flatten(1).long() rel_y = down(rel_y) rel_x = down(rel_x) rel_y += self.H - 1 rel_x += self.W - 1 return rel_y, rel_x def forward(self, query): """ Args: query: [B, n_heads, C_qk, H*W] return: [B, n_heads, H*W, H*W] """ r_x = self.w_emb(self.rel_x) r_y = self.h_emb(self.rel_y) S_rel = torch.einsum('bhci,ijc->bhij', query, r_x + r_y) return S_rel class Attention(nn.Module): def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale= False, n_heads=1, down_kv=False, rel_pos_size=None): """ Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding. """ super().__init__() self.n_heads = n_heads self.down_kv = down_kv w_norm = w_norm_dispatch(w_norm) self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1)) self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1)) self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1)) self.out = w_norm(nn.Conv2d(C_v, C_v, 1)) if scale: self.scale = 1.0 / C_qk ** 0.5 if rel_pos_size: C_h_qk = C_qk // n_heads self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv) def forward(self, x, y): """ Attend from x (decoder) to y (encoder) Args: x: decoder feature y: encoder feature """ B, C, H, W = x.shape flat_x = x.flatten(start_dim=2) if not self.down_kv: flat_y = y.flatten(start_dim=2) else: y_down = F.avg_pool2d(y, 2) flat_y = y_down.flatten(2) query = self.q_proj(flat_x) key = self.k_proj(flat_y) value = self.v_proj(flat_y) query = split_dim(query, 1, self.n_heads) key = split_dim(key, 1, self.n_heads) value = split_dim(value, 1, self.n_heads) attn_score = torch.einsum('bhcq,bhck->bhqk', query, key) if hasattr(self, 'rel_pos'): attn_score += self.rel_pos(query) if hasattr(self, 'scale'): attn_score *= self.scale attn_w = F.softmax(attn_score, dim=-1) attn_out = torch.einsum('bhqk,bhck->bhcq', attn_w, value).reshape(B, C, H, W) out = self.out(attn_out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C_in_q': 4, 'C_in_kv': 4, 'C_qk': 4, 'C_v': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 16), (64, 16, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 16), (64, 16, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf3, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = buf1 del buf1 triton_poi_fused_convolution_0[grid(256)](buf4, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 1, 16, 16), (256, 16, 16, 1), torch. float32) triton_per_fused__softmax_1[grid(64)](buf5, buf8, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf5 buf9 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf9, primals_8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (4, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), out=buf10) buf11 = extern_kernels.convolution(reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 1, 16, 4)) buf12 = buf11 del buf11 triton_poi_fused_convolution_2[grid(256)](buf12, primals_10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 return (buf12, primals_3, primals_5, primals_7, primals_9, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), buf8, reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf9, buf3, reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0)) def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] def split_dim(x, dim, n_chunks): shape = x.shape assert shape[dim] % n_chunks == 0 return x.view(*shape[:dim], n_chunks, shape[dim] // n_chunks, *shape[ dim + 1:]) class RelativePositionalEmbedding2d(nn.Module): """ Learned relative positional embedding return Q * (R_x + R_y) for input Q and learned embedding R """ def __init__(self, emb_dim, H, W, down_kv=False): super().__init__() self.H = H self.W = W self.down_kv = down_kv self.h_emb = nn.Embedding(H * 2 - 1, emb_dim) self.w_emb = nn.Embedding(W * 2 - 1, emb_dim) rel_y, rel_x = self.rel_grid() self.register_buffer('rel_y', rel_y) self.register_buffer('rel_x', rel_x) def rel_grid(self): y, x = torch.meshgrid(torch.arange(self.H), torch.arange(self.W)) rel_y = y.reshape(1, -1) - y.reshape(-1, 1) rel_x = x.reshape(1, -1) - x.reshape(-1, 1) if self.down_kv: def down(x): n_q, n_k = x.shape x = x.view(n_q, 1, int(n_k ** 0.5), int(n_k ** 0.5)) return (F.avg_pool2d(x.float(), 2) - 0.5).flatten(1).long() rel_y = down(rel_y) rel_x = down(rel_x) rel_y += self.H - 1 rel_x += self.W - 1 return rel_y, rel_x def forward(self, query): """ Args: query: [B, n_heads, C_qk, H*W] return: [B, n_heads, H*W, H*W] """ r_x = self.w_emb(self.rel_x) r_y = self.h_emb(self.rel_y) S_rel = torch.einsum('bhci,ijc->bhij', query, r_x + r_y) return S_rel class AttentionNew(nn.Module): def __init__(self, C_in_q, C_in_kv, C_qk, C_v, w_norm='none', scale= False, n_heads=1, down_kv=False, rel_pos_size=None): """ Args: C_in_q: query source (encoder feature x) C_in_kv: key/value source (decoder feature y) C_qk: inner query/key dim, which should be same C_v: inner value dim, which same as output dim down_kv: Area attention for lightweight self-attention w/ mean pooling. rel_pos_size: height & width for relative positional embedding. If None or 0 is given, do not use relative positional embedding. """ super().__init__() self.n_heads = n_heads self.down_kv = down_kv w_norm = w_norm_dispatch(w_norm) self.q_proj = w_norm(nn.Conv1d(C_in_q, C_qk, 1)) self.k_proj = w_norm(nn.Conv1d(C_in_kv, C_qk, 1)) self.v_proj = w_norm(nn.Conv1d(C_in_kv, C_v, 1)) self.out = w_norm(nn.Conv2d(C_v, C_v, 1)) if scale: self.scale = 1.0 / C_qk ** 0.5 if rel_pos_size: C_h_qk = C_qk // n_heads self.rel_pos = RelativePositionalEmbedding2d(C_h_qk, rel_pos_size, rel_pos_size, down_kv=down_kv) def forward(self, input_0, input_1): primals_3 = self.q_proj.weight primals_4 = self.q_proj.bias primals_5 = self.k_proj.weight primals_6 = self.k_proj.bias primals_7 = self.v_proj.weight primals_8 = self.v_proj.bias primals_9 = self.out.weight primals_10 = self.out.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
derwind/dmfont
Attention
false
15,176
[ "MIT" ]
95
17a91a9cc1917d2485eaa8e92b68245578920c76
https://github.com/derwind/dmfont/tree/17a91a9cc1917d2485eaa8e92b68245578920c76
EncoderImageWeightNormPrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vx/cvxzmthv4i2niuhjkx7pdwegys74ubmwp36fuzpk743r7lkqg4tm.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/x2/cx2e5rdeusxxcqfojnklfpvernnjnuzqu3eanmgcyclgrncdxyae.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] # Source node to ATen node mapping: # X => div_1 # norm => add # pow_1 => pow_3 # sqrt => sqrt # sum_1 => sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_1 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] triton_poi_fused_add_div_pow_sqrt_sum_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) return (buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecomp(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_pow_sqrt_sum_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4 , (64, 4), (4, 1), 0), buf3 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecompNew, self).load_state_dict(new_state ) def forward(self, input_0): primals_3 = self.fc.bias primals_1 = self.fc.weight_g primals_2 = self.fc.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
devilslot/SCAN
EncoderImageWeightNormPrecomp
false
15,177
[ "Apache-2.0" ]
428
01812aa98e2ebe39695c8906589b6fe66b2a0d6e
https://github.com/devilslot/SCAN/tree/01812aa98e2ebe39695c8906589b6fe66b2a0d6e
CopyChannels
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iw/ciwx6mmmmkga4bsqpmbzvsp2tm5mkfzpf7yt255mstmwpegqy2iw.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = (xindex // 192) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4, 4), (192, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 768, grid=grid(768), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 12, 4, 4), (192, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class CopyChannels(torch.nn.Module): def __init__(self, multiple=3, dim=1): super(CopyChannels, self).__init__() self.multiple = multiple self.dim = dim def forward(self, x): return torch.cat([x for _ in range(self.multiple)], dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex // 192 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4, 4), (192, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(768)](arg0_1, buf0, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 12, 4, 4), (192, 16, 4, 1), 0), class CopyChannelsNew(torch.nn.Module): def __init__(self, multiple=3, dim=1): super(CopyChannelsNew, self).__init__() self.multiple = multiple self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dianjixz/AutoDL
CopyChannels
false
15,178
[ "Apache-2.0" ]
1,044
48db4eb04d55ce69e93d4a3bdc24592bdb34a868
https://github.com/dianjixz/AutoDL/tree/48db4eb04d55ce69e93d4a3bdc24592bdb34a868
CReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/o3/co3lab6vjzwd5k7mdxlboknauftr7dmlghrc2dujibjcglnlpccf.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.01 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tmp14 = tl.full([1], 8, tl.int64) tmp15 = tmp0 < tmp14 tmp16 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp13 & xmask, other=0.0) tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tl.where(tmp17, tmp16, tmp18) tmp20 = -tmp19 tmp21 = tmp20 > tmp6 tmp22 = tmp20 * tmp8 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp13, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp12, tmp25) tl.store(out_ptr0 + (x3), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zt/cztrha3eswrw5wsxskayqbrpjfbwic3ptv7qe3d23e7bnagq33hu.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.01), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %where), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr1 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class CReLU(nn.Module): def __init__(self): super(CReLU, self).__init__() def forward(self, x): return torch.cat((F.leaky_relu(x, 0.01, inplace=True), F.leaky_relu (-x, 0.01, inplace=True)), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.01 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp4, tmp10, tmp11) tmp13 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp13 & xmask, other=0.0) tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tl.where(tmp17, tmp16, tmp18) tmp20 = -tmp19 tmp21 = tmp20 > tmp6 tmp22 = tmp20 * tmp8 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp13, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp12, tmp25) tl.store(out_ptr0 + x3, tmp26, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr1 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) triton_poi_fused_leaky_relu_1[grid(256)](arg0_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class CReLUNew(nn.Module): def __init__(self): super(CReLUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dipikakhullar/ocr
CReLU
false
15,179
[ "MIT" ]
284
a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
https://github.com/dipikakhullar/ocr/tree/a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tf/ctfli7vqflj6i63a7y5mg3d6qberai23wf7yzknmpncgz4s6xcpo.py # Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.relu, aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # x => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_relu_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + (x4), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mw/cmwpr6nqmwswkubo27tfnt72rvkkjzfbb77g7avbkx2pnfjlim6s.py # Topologically Sorted Source Nodes: [x_1, x_2, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd] # Source node to ATen node mapping: # pad_1 => constant_pad_nd_1 # x_1 => convolution # x_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_relu_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x4 = (xindex // 36) x2 = (xindex // 36) % 4 x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + (x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tl.store(out_ptr0 + (x6), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yl/cyl57twtgf3lzd5sst7snomgtzysir6mpvrzx6jm7k4lxpcq6sru.py # Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # out => add # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/64/c64kbll2vwngzupaesjb3bkeqvvmff5z5c4ptcrwvfprjnzaxdkv.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.relu, aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_relu_1.run(buf1, primals_3, buf2, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf4, primals_5, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_3.run(buf1, primals_3, buf5, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf4, primals_2, primals_4, buf0, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from functools import partial import torch.nn as nn def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] @dispatcher def norm_dispatch(norm): return {'none': nn.Identity, 'in': partial(nn.InstanceNorm2d, affine= False), 'bn': nn.BatchNorm2d}[norm.lower()] @dispatcher def pad_dispatch(pad_type): return {'zero': nn.ZeroPad2d, 'replicate': nn.ReplicationPad2d, 'reflect': nn.ReflectionPad2d}[pad_type.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class ConvBlock(nn.Module): """Pre-activate conv block""" def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, norm='none', activ='relu', bias=True, upsample=False, downsample= False, w_norm='none', pad_type='zero', dropout=0.0): if kernel_size == 1: assert padding == 0 super().__init__() self.C_in = C_in self.C_out = C_out activ = activ_dispatch(activ) norm = norm_dispatch(norm) w_norm = w_norm_dispatch(w_norm) pad = pad_dispatch(pad_type) self.upsample = upsample self.downsample = downsample self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout2d(p=dropout) self.pad = pad(padding) self.conv = w_norm(nn.Conv2d(C_in, C_out, kernel_size, stride, bias =bias)) def forward(self, x): x = self.norm(x) x = self.activ(x) if self.upsample: x = F.interpolate(x, scale_factor=2) if hasattr(self, 'dropout'): x = self.dropout(x) x = self.conv(self.pad(x)) if self.downsample: x = F.avg_pool2d(x, 2) return x class ResBlock(nn.Module): """Pre-activate residual block""" def __init__(self, C_in, C_out, kernel_size=3, padding=1, upsample= False, downsample=False, norm='none', w_norm='none', activ='relu', pad_type='zero', dropout=0.0): assert not (upsample and downsample) super().__init__() w_norm = w_norm_dispatch(w_norm) self.C_in = C_in self.C_out = C_out self.upsample = upsample self.downsample = downsample self.conv1 = ConvBlock(C_in, C_out, kernel_size, 1, padding, norm, activ, upsample=upsample, w_norm=w_norm, pad_type=pad_type, dropout=dropout) self.conv2 = ConvBlock(C_out, C_out, kernel_size, 1, padding, norm, activ, w_norm=w_norm, pad_type=pad_type, dropout=dropout) if C_in != C_out or upsample or downsample: self.skip = w_norm(nn.Conv2d(C_in, C_out, 1)) def forward(self, x): out = x out = self.conv1(out) out = self.conv2(out) if self.downsample: out = F.avg_pool2d(out, 2) if hasattr(self, 'skip'): if self.upsample: x = F.interpolate(x, scale_factor=2) x = self.skip(x) if self.downsample: x = F.avg_pool2d(x, 2) out = out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C_in': 4, 'C_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F from functools import partial import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + x4, tmp15, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + x2, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tl.store(out_ptr0 + x6, tmp17, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_relu_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_relu_1[grid(576)](buf1, primals_3, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(256)](buf1, primals_3, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return buf4, primals_2, primals_4, buf0, buf2, buf5 def dispatcher(dispatch_fn): def decorated(key, *args): if callable(key): return key if key is None: key = 'none' return dispatch_fn(key, *args) return decorated @dispatcher def activ_dispatch(activ): return {'none': nn.Identity, 'relu': nn.ReLU, 'lrelu': partial(nn. LeakyReLU, negative_slope=0.2)}[activ.lower()] @dispatcher def norm_dispatch(norm): return {'none': nn.Identity, 'in': partial(nn.InstanceNorm2d, affine= False), 'bn': nn.BatchNorm2d}[norm.lower()] @dispatcher def pad_dispatch(pad_type): return {'zero': nn.ZeroPad2d, 'replicate': nn.ReplicationPad2d, 'reflect': nn.ReflectionPad2d}[pad_type.lower()] def spectral_norm(module): """ init & apply spectral norm """ nn.init.xavier_uniform_(module.weight, 2 ** 0.5) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() return nn.utils.spectral_norm(module) @dispatcher def w_norm_dispatch(w_norm): return {'spectral': spectral_norm, 'none': lambda x: x}[w_norm.lower()] class ConvBlock(nn.Module): """Pre-activate conv block""" def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=1, norm='none', activ='relu', bias=True, upsample=False, downsample= False, w_norm='none', pad_type='zero', dropout=0.0): if kernel_size == 1: assert padding == 0 super().__init__() self.C_in = C_in self.C_out = C_out activ = activ_dispatch(activ) norm = norm_dispatch(norm) w_norm = w_norm_dispatch(w_norm) pad = pad_dispatch(pad_type) self.upsample = upsample self.downsample = downsample self.norm = norm(C_in) self.activ = activ() if dropout > 0.0: self.dropout = nn.Dropout2d(p=dropout) self.pad = pad(padding) self.conv = w_norm(nn.Conv2d(C_in, C_out, kernel_size, stride, bias =bias)) def forward(self, x): x = self.norm(x) x = self.activ(x) if self.upsample: x = F.interpolate(x, scale_factor=2) if hasattr(self, 'dropout'): x = self.dropout(x) x = self.conv(self.pad(x)) if self.downsample: x = F.avg_pool2d(x, 2) return x class ResBlockNew(nn.Module): """Pre-activate residual block""" def __init__(self, C_in, C_out, kernel_size=3, padding=1, upsample= False, downsample=False, norm='none', w_norm='none', activ='relu', pad_type='zero', dropout=0.0): assert not (upsample and downsample) super().__init__() w_norm = w_norm_dispatch(w_norm) self.C_in = C_in self.C_out = C_out self.upsample = upsample self.downsample = downsample self.conv1 = ConvBlock(C_in, C_out, kernel_size, 1, padding, norm, activ, upsample=upsample, w_norm=w_norm, pad_type=pad_type, dropout=dropout) self.conv2 = ConvBlock(C_out, C_out, kernel_size, 1, padding, norm, activ, w_norm=w_norm, pad_type=pad_type, dropout=dropout) if C_in != C_out or upsample or downsample: self.skip = w_norm(nn.Conv2d(C_in, C_out, 1)) def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
derwind/dmfont
ResBlock
false
15,180
[ "MIT" ]
95
17a91a9cc1917d2485eaa8e92b68245578920c76
https://github.com/derwind/dmfont/tree/17a91a9cc1917d2485eaa8e92b68245578920c76
IdentityPadding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/i5/ci5jtsingp2qnrodx6wlbyak3tjanrom5rpybyruspgrcwq6flhn.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class IdentityPadding(nn.Module): def __init__(self, in_channels, out_channels, stride): super(IdentityPadding, self).__init__() self.pooling = nn.MaxPool2d(1, stride=stride) self.add_channels = out_channels - in_channels def forward(self, x): out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels)) out = self.pooling(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class IdentityPaddingNew(nn.Module): def __init__(self, in_channels, out_channels, stride): super(IdentityPaddingNew, self).__init__() self.pooling = nn.MaxPool2d(1, stride=stride) self.add_channels = out_channels - in_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dnddnjs/pytorch-vision
IdentityPadding
false
15,181
[ "MIT" ]
48
d432b467774f838bef37372d6cff3576c6559803
https://github.com/dnddnjs/pytorch-vision/tree/d432b467774f838bef37372d6cff3576c6559803
BertSelfOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # u => mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # add => add # sub => sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4p/c4pnuv3rymhg72qutbvx7mkzv6t7edcefa73bt3nl66b4qtouu4a.py # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2 # mul => mul # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0) del primals_6 return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfOutputNew(nn.Module): def __init__(self, config): super(BertSelfOutputNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dfhby0/CBLUE
BertSelfOutput
false
15,182
[ "Apache-2.0" ]
293
36bdb52f17c4379d4a5f8b407890ba294017b5e2
https://github.com/dfhby0/CBLUE/tree/36bdb52f17c4379d4a5f8b407890ba294017b5e2
TwoLayerNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ai/caitnpldotnv4k4oj67wyecd2ig4qcjrnmr35rmx6o2vxx245xs3.py # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # h_relu => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf3, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn class TwoLayerNet(torch.nn.Module): def __init__(self, D_in, H, D_out): super(TwoLayerNet, self).__init__() self.linear1 = torch.nn.Linear(D_in, H) self.linear2 = torch.nn.Linear(H, D_out) def forward(self, x): h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'H': 4, 'D_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class TwoLayerNetNew(torch.nn.Module): def __init__(self, D_in, H, D_out): super(TwoLayerNetNew, self).__init__() self.linear1 = torch.nn.Linear(D_in, H) self.linear2 = torch.nn.Linear(H, D_out) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dionhaefner/delve
TwoLayerNet
false
15,183
[ "MIT" ]
69
811756520cbfd8dce4427c53203ac193f61a94d1
https://github.com/dionhaefner/delve/tree/811756520cbfd8dce4427c53203ac193f61a94d1
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_qk => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ss/csssqqlybvm2dlovzxhxorvbhyaj46oqebdryly2s5obgs7rshwq.py # Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax] # Source node to ATen node mapping: # attention_weights => amax, div_1, exp, sub, sum_1 # mul => mul # scaled_attention_logits_1 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_10, -1000000000.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_add_mul_1 = async_compile.triton('triton_per_fused__softmax_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = -1000000000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float("-inf")) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # output_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ok/cokamvfj3z4xuz3jmalftfns3huimimr3c4gzm52vaybmdliglu4.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # output_1 => add_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_12), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax] triton_per_fused__softmax_add_mul_1.run(buf5, primals_10, buf8, 256, 16, grid=grid(256), stream=stream0) del buf5 del primals_10 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf13, primals_12, 256, grid=grid(256), stream=stream0) del primals_12 return (buf13, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np def scaled_dot_product_attention(q, k, v, mask): matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2)) dk = k.shape[-1] scaled_attention_logits = matmul_qk / np.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1000000000.0 attention_weights = torch.softmax(scaled_attention_logits, dim=-1) output = torch.matmul(attention_weights, v) return output class MultiHeadAttention(torch.nn.Module): def __init__(self, d_model_size, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model_size = d_model_size self.depth = int(d_model_size / self.num_heads) self.Wq = torch.nn.Linear(d_model_size, d_model_size) self.Wk = torch.nn.Linear(d_model_size, d_model_size) self.Wv = torch.nn.Linear(d_model_size, d_model_size) self.dense = torch.nn.Linear(d_model_size, d_model_size) def split_into_heads(self, x, batch_size): x = x.reshape(batch_size, -1, self.num_heads, self.depth) return x.permute([0, 2, 1, 3]) def forward(self, v, k, q, mask): batch_size = q.shape[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) scaled_attention = scaled_dot_product_attention(q, k, v, mask).permute( [0, 2, 1, 3]) original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size) output = self.dense(original_size_attention) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 16, 16])] def get_init_inputs(): return [[], {'d_model_size': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = -1000000000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_add_mul_1[grid(256)](buf5, primals_10, buf8, 256, 16, XBLOCK=128, num_warps=8, num_stages=1) del buf5 del primals_10 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0) del buf12 triton_poi_fused_add_3[grid(256)](buf13, primals_12, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_12 return buf13, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def scaled_dot_product_attention(q, k, v, mask): matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2)) dk = k.shape[-1] scaled_attention_logits = matmul_qk / np.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1000000000.0 attention_weights = torch.softmax(scaled_attention_logits, dim=-1) output = torch.matmul(attention_weights, v) return output class MultiHeadAttentionNew(torch.nn.Module): def __init__(self, d_model_size, num_heads): super(MultiHeadAttentionNew, self).__init__() self.num_heads = num_heads self.d_model_size = d_model_size self.depth = int(d_model_size / self.num_heads) self.Wq = torch.nn.Linear(d_model_size, d_model_size) self.Wk = torch.nn.Linear(d_model_size, d_model_size) self.Wv = torch.nn.Linear(d_model_size, d_model_size) self.dense = torch.nn.Linear(d_model_size, d_model_size) def split_into_heads(self, x, batch_size): x = x.reshape(batch_size, -1, self.num_heads, self.depth) return x.permute([0, 2, 1, 3]) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.Wq.weight primals_3 = self.Wq.bias primals_4 = self.Wk.weight primals_5 = self.Wk.bias primals_7 = self.Wv.weight primals_8 = self.Wv.bias primals_11 = self.dense.weight primals_12 = self.dense.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
depengchen123/ctrl
MultiHeadAttention
false
15,184
[ "BSD-3-Clause" ]
1,559
8673e9ec1bf6441ad8d793a626cdfd8c1fd9c4e4
https://github.com/depengchen123/ctrl/tree/8673e9ec1bf6441ad8d793a626cdfd8c1fd9c4e4
BatchNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kt/cktr2fi4oyvatsyylkr2wbxli7afdvnkso4duaghvvhtpd2q6ygy.py # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, var, log, sum_1, logdet], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.log, aten.sum, aten.mul] # Source node to ATen node mapping: # log => log # logdet => mul # mean => mean # mean_1 => mean_1 # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # var => add # Graph fragment: # %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {}) triton_per_fused_add_log_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = tl_math.log(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = -0.5 tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xm/cxmt6duqqeteu4xwsym3p3vmedzj7nayhm62z7v7nl2hxjisngyy.py # Topologically Sorted Source Nodes: [sub_1, sqrt, u], Original ATen: [aten.sub, aten.sqrt, aten.div] # Source node to ATen node mapping: # sqrt => sqrt # sub_1 => sub_1 # u => div # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {}) triton_poi_fused_div_sqrt_sub_1 = async_compile.triton('triton_poi_fused_div_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp2 / tmp4 tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, var, log, sum_1, logdet], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.log, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_pow_sub_sum_0.run(buf4, arg0_1, buf0, buf1, 1, 64, grid=grid(1), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, sqrt, u], Original ATen: [aten.sub, aten.sqrt, aten.div] triton_poi_fused_div_sqrt_sub_1.run(arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf2, buf4, buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from abc import abstractmethod from torch import tensor import torch.nn as nn import numpy.random as rng class BaseFlow(nn.Module): """ """ def __init__(self, n_inputs, **kwargs): super().__init__() self.n_inputs = n_inputs @abstractmethod def forward(self, x, **kwargs): raise NotImplementedError() @abstractmethod def generate_samples(self, n_samples=1, u=None, **kwargs): raise NotImplementedError() def log_likelihood(self, x, **kwargs): """ Calculates log p(x) with a Gaussian base density """ u, logdet_dudx = self.forward(x, **kwargs) constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi)) log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1 ) + logdet_dudx return u, log_likelihood def log_likelihood_and_score(self, x, **kwargs): """ Calculates log p(x) and t(x) with a Gaussian base density """ u, log_likelihood = self.log_likelihood(x, **kwargs) return u, log_likelihood, None class BatchNorm(BaseFlow): """BatchNorm implementation""" def __init__(self, n_inputs, alpha=0.1, eps=1e-05): super().__init__(n_inputs) self.n_inputs = n_inputs self.alpha = alpha self.eps = eps self.calculated_running_mean = False self.running_mean = torch.zeros(self.n_inputs) self.running_var = torch.zeros(self.n_inputs) def forward(self, x, fixed_params=False): """Calculates x -> u(x) (batch norming)""" if fixed_params: mean = self.running_mean var = self.running_var else: mean = torch.mean(x, dim=0) var = torch.mean((x - mean) ** 2, dim=0) + self.eps if not self.calculated_running_mean: self.running_mean = mean self.running_var = var else: self.running_mean = (1.0 - self.alpha ) * self.running_mean + self.alpha * mean self.running_var = (1.0 - self.alpha ) * self.running_var + self.alpha * var self.calculated_running_mean = True u = (x - mean) / torch.sqrt(var) logdet = -0.5 * torch.sum(torch.log(var)) return u, logdet def inverse(self, u): """Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)""" return torch.sqrt(self.running_var) * u + self.running_mean def generate_samples(self, n_samples=1, u=None, **kwargs): if u is None: u = tensor(rng.randn(n_samples, self.n_inputs)) return torch.sqrt(self.running_var) * u + self.running_mean def to(self, *args, **kwargs): logger.debug('Transforming BatchNorm to %s', args) self = super() self.running_mean = self.running_mean self.running_var = self.running_var return self def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_inputs': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from abc import abstractmethod from torch import tensor import torch.nn as nn import numpy.random as rng assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = tl_math.log(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = -0.5 tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None) @triton.jit def triton_poi_fused_div_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = libdevice.sqrt(tmp3) tmp5 = tmp2 / tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 get_raw_stream(0) triton_per_fused_add_log_mean_mul_pow_sub_sum_0[grid(1)](buf4, arg0_1, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_sqrt_sub_1[grid(256)](arg0_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf2, buf4, buf1, buf0 class BaseFlow(nn.Module): """ """ def __init__(self, n_inputs, **kwargs): super().__init__() self.n_inputs = n_inputs @abstractmethod def forward(self, x, **kwargs): raise NotImplementedError() @abstractmethod def generate_samples(self, n_samples=1, u=None, **kwargs): raise NotImplementedError() def log_likelihood(self, x, **kwargs): """ Calculates log p(x) with a Gaussian base density """ u, logdet_dudx = self.forward(x, **kwargs) constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi)) log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1 ) + logdet_dudx return u, log_likelihood def log_likelihood_and_score(self, x, **kwargs): """ Calculates log p(x) and t(x) with a Gaussian base density """ u, log_likelihood = self.log_likelihood(x, **kwargs) return u, log_likelihood, None class BatchNormNew(BaseFlow): """BatchNorm implementation""" def __init__(self, n_inputs, alpha=0.1, eps=1e-05): super().__init__(n_inputs) self.n_inputs = n_inputs self.alpha = alpha self.eps = eps self.calculated_running_mean = False self.running_mean = torch.zeros(self.n_inputs) self.running_var = torch.zeros(self.n_inputs) def inverse(self, u): """Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)""" return torch.sqrt(self.running_var) * u + self.running_mean def generate_samples(self, n_samples=1, u=None, **kwargs): if u is None: u = tensor(rng.randn(n_samples, self.n_inputs)) return torch.sqrt(self.running_var) * u + self.running_mean def to(self, *args, **kwargs): logger.debug('Transforming BatchNorm to %s', args) self = super() self.running_mean = self.running_mean self.running_var = self.running_var return self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
diana-hep/madminer
BatchNorm
false
15,185
[ "MIT" ]
46
3a585d2887a31886cdeadddb0a284f0472146fce
https://github.com/diana-hep/madminer/tree/3a585d2887a31886cdeadddb0a284f0472146fce
LayerCake
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ai/caitnpldotnv4k4oj67wyecd2ig4qcjrnmr35rmx6o2vxx245xs3.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # x => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %ge_4 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf15, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clamp, aten.ge] triton_poi_fused_clamp_ge_0.run(buf2, primals_5, buf3, buf14, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clamp, aten.ge] triton_poi_fused_clamp_ge_0.run(buf4, primals_7, buf5, buf13, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clamp, aten.ge] triton_poi_fused_clamp_ge_0.run(buf6, primals_9, buf7, buf12, 256, grid=grid(256), stream=stream0) del primals_9 buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clamp, aten.ge] triton_poi_fused_clamp_ge_0.run(buf8, primals_11, buf9, buf11, 256, grid=grid(256), stream=stream0) del primals_11 buf10 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_13 return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (64, 4), (4, 1), 0), primals_12, buf11, primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn class LayerCake(torch.nn.Module): def __init__(self, D_in, H1, H2, H3, H4, H5, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(LayerCake, self).__init__() self.linear1 = torch.nn.Linear(D_in, H1) self.linear2 = torch.nn.Linear(H1, H2) self.linear3 = torch.nn.Linear(H2, H3) self.linear4 = torch.nn.Linear(H3, H4) self.linear5 = torch.nn.Linear(H4, H5) self.linear6 = torch.nn.Linear(H5, D_out) def forward(self, x): """ In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary (differentiable) operations on Tensors. """ x = self.linear1(x).clamp(min=0) x = self.linear2(x).clamp(min=0) x = self.linear3(x).clamp(min=0) x = self.linear4(x).clamp(min=0) x = self.linear5(x).clamp(min=0) y_pred = self.linear6(x) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'H1': 4, 'H2': 4, 'H3': 4, 'H4': 4, 'H5': 4, 'D_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf15, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_0[grid(256)](buf2, primals_5, buf3, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_0[grid(256)](buf4, primals_7, buf5, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = buf4 del buf4 extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_0[grid(256)](buf6, primals_9, buf7, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = buf6 del buf6 extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_0[grid(256)](buf8, primals_11, buf9, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = buf8 del buf8 extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_13 return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (64, 4), (4, 1), 0), primals_12, buf11, primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15 ) class LayerCakeNew(torch.nn.Module): def __init__(self, D_in, H1, H2, H3, H4, H5, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(LayerCakeNew, self).__init__() self.linear1 = torch.nn.Linear(D_in, H1) self.linear2 = torch.nn.Linear(H1, H2) self.linear3 = torch.nn.Linear(H2, H3) self.linear4 = torch.nn.Linear(H3, H4) self.linear5 = torch.nn.Linear(H4, H5) self.linear6 = torch.nn.Linear(H5, D_out) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_12 = self.linear6.weight primals_13 = self.linear6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
dionhaefner/delve
LayerCake
false
15,186
[ "MIT" ]
69
811756520cbfd8dce4427c53203ac193f61a94d1
https://github.com/dionhaefner/delve/tree/811756520cbfd8dce4427c53203ac193f61a94d1
DWT
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lg/clgsnl67svqt4ib2oobfi2mssr54u53p7psjxx65a32h2fm6jrhb.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_2, %add_4, %add_6, %add_7], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 4) % 16 x0 = xindex % 2 x1 = (xindex // 2) % 2 x3 = (xindex // 64) x4 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp8 * tmp6 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp6 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1) + (16*x2) + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 * tmp6 tmp16 = tmp13 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 8, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*((-4) + x2)) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp23 * tmp6 tmp25 = -tmp24 tmp26 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1) + (16*((-4) + x2)) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 * tmp6 tmp28 = tmp25 - tmp27 tmp29 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1) + (16*((-4) + x2)) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp29 * tmp6 tmp31 = tmp28 + tmp30 tmp32 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1) + (16*((-4) + x2)) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp32 * tmp6 tmp34 = tmp31 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp22, tmp34, tmp35) tmp37 = tmp0 >= tmp20 tmp38 = tl.full([1], 12, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*((-8) + x2)) + (64*x3)), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tmp41 * tmp6 tmp43 = -tmp42 tmp44 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1) + (16*((-8) + x2)) + (64*x3)), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp45 = tmp44 * tmp6 tmp46 = tmp43 + tmp45 tmp47 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1) + (16*((-8) + x2)) + (64*x3)), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 * tmp6 tmp49 = tmp46 - tmp48 tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1) + (16*((-8) + x2)) + (64*x3)), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp50 * tmp6 tmp52 = tmp49 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp40, tmp52, tmp53) tmp55 = tmp0 >= tmp38 tmp56 = tl.full([1], 16, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*((-12) + x2)) + (64*x3)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp59 = tmp58 * tmp6 tmp60 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1) + (16*((-12) + x2)) + (64*x3)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 * tmp6 tmp62 = tmp59 - tmp61 tmp63 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1) + (16*((-12) + x2)) + (64*x3)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tmp63 * tmp6 tmp65 = tmp62 - tmp64 tmp66 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1) + (16*((-12) + x2)) + (64*x3)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tmp66 * tmp6 tmp68 = tmp65 + tmp67 tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype) tmp70 = tl.where(tmp55, tmp68, tmp69) tmp71 = tl.where(tmp40, tmp54, tmp70) tmp72 = tl.where(tmp22, tmp36, tmp71) tmp73 = tl.where(tmp4, tmp18, tmp72) tl.store(out_ptr0 + (x4), tmp73, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.fft class DWT(nn.Module): """ 2D Discrete Wavelet Transform as implemented in [1]_. References ---------- .. [1] Liu, Pengju, et al. “Multi-Level Wavelet-CNN for Image Restoration.” ArXiv:1805.07071 [Cs], May 2018. arXiv.org, http://arxiv.org/abs/1805.07071. """ def __init__(self): super().__init__() self.requires_grad = False def forward(self, x: 'torch.Tensor') ->torch.Tensor: x01 = x[:, :, 0::2, :] / 2 x02 = x[:, :, 1::2, :] / 2 x1 = x01[:, :, :, 0::2] x2 = x02[:, :, :, 0::2] x3 = x01[:, :, :, 1::2] x4 = x02[:, :, :, 1::2] x_LL = x1 + x2 + x3 + x4 x_HL = -x1 - x2 + x3 + x4 x_LH = -x1 + x2 - x3 + x4 x_HH = x1 - x2 - x3 + x4 return torch.cat((x_LL, x_HL, x_LH, x_HH), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fft assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 16 x0 = xindex % 2 x1 = xindex // 2 % 2 x3 = xindex // 64 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp8 * tmp6 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp6 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 * tmp6 tmp16 = tmp13 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tmp20 = tl.full([1], 8, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp23 * tmp6 tmp25 = -tmp24 tmp26 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 * tmp6 tmp28 = tmp25 - tmp27 tmp29 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp29 * tmp6 tmp31 = tmp28 + tmp30 tmp32 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp32 * tmp6 tmp34 = tmp31 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp22, tmp34, tmp35) tmp37 = tmp0 >= tmp20 tmp38 = tl.full([1], 12, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tmp41 * tmp6 tmp43 = -tmp42 tmp44 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp45 = tmp44 * tmp6 tmp46 = tmp43 + tmp45 tmp47 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 * tmp6 tmp49 = tmp46 - tmp48 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp50 * tmp6 tmp52 = tmp49 + tmp51 tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype) tmp54 = tl.where(tmp40, tmp52, tmp53) tmp55 = tmp0 >= tmp38 tl.full([1], 16, tl.int64) tmp58 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp59 = tmp58 * tmp6 tmp60 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 * tmp6 tmp62 = tmp59 - tmp61 tmp63 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tmp63 * tmp6 tmp65 = tmp62 - tmp64 tmp66 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tmp66 * tmp6 tmp68 = tmp65 + tmp67 tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype) tmp70 = tl.where(tmp55, tmp68, tmp69) tmp71 = tl.where(tmp40, tmp54, tmp70) tmp72 = tl.where(tmp22, tmp36, tmp71) tmp73 = tl.where(tmp4, tmp18, tmp72) tl.store(out_ptr0 + x4, tmp73, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class DWTNew(nn.Module): """ 2D Discrete Wavelet Transform as implemented in [1]_. References ---------- .. [1] Liu, Pengju, et al. “Multi-Level Wavelet-CNN for Image Restoration.” ArXiv:1805.07071 [Cs], May 2018. arXiv.org, http://arxiv.org/abs/1805.07071. """ def __init__(self): super().__init__() self.requires_grad = False def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
directgroup/direct
DWT
false
15,187
[ "Apache-2.0" ]
55
78cdd530b3c93e31c11d8963880e6329f0989243
https://github.com/directgroup/direct/tree/78cdd530b3c93e31c11d8963880e6329f0989243
CReLU_IN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/de/cdevjdi7y6x3s4r3k3szbteg2ev2eakun5rtqomlfjaapyyqnqaf.py # Topologically Sorted Source Nodes: [cat, x, leaky_relu], Original ATen: [aten.cat, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # cat => cat # leaky_relu => gt, mul_2, where # x => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %neg], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_2), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 8 r2 = rindex x1 = (xindex // 8) x3 = xindex tmp37 = tl.load(in_ptr1 + (x3 % 8), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + (x3 % 8), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (r2 + (16*x0) + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (r2 + (16*((-4) + x0)) + (64*x1)), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tl.full([XBLOCK, 1], 16, tl.int32) tmp22 = tmp21.to(tl.float32) tmp23 = tmp20 / tmp22 tmp24 = tmp14 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp13 - tmp23 tmp31 = 16.0 tmp32 = tmp29 / tmp31 tmp33 = 1e-05 tmp34 = tmp32 + tmp33 tmp35 = libdevice.rsqrt(tmp34) tmp36 = tmp30 * tmp35 tmp38 = tmp36 * tmp37 tmp40 = tmp38 + tmp39 tmp41 = 0.0 tmp42 = tmp40 > tmp41 tmp43 = 0.01 tmp44 = tmp40 * tmp43 tmp45 = tl.where(tmp42, tmp40, tmp44) tmp46 = tmp45 > tmp41 tl.store(out_ptr0 + (r2 + (16*x3)), tmp13, xmask) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp45, xmask) tl.store(out_ptr3 + (r2 + (16*x3)), tmp46, xmask) tl.store(out_ptr4 + (x3), tmp35, xmask) tl.store(out_ptr1 + (x3), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf5 = empty_strided_cuda((1, 32, 4, 4), (512, 16, 4, 1), torch.float32) buf6 = reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf5 # reuse buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [cat, x, leaky_relu], Original ATen: [aten.cat, aten._native_batch_norm_legit, aten.leaky_relu, aten.leaky_relu_backward] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0.run(buf6, primals_1, primals_2, primals_3, buf0, buf1, buf7, buf4, 32, 16, grid=grid(32), stream=stream0) del primals_1 del primals_2 del primals_3 return (buf6, buf0, reinterpret_tensor(buf4, (32, ), (1, ), 0), buf7, reinterpret_tensor(buf1, (1, 32, 1, 1), (32, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class CReLU_IN(nn.Module): def __init__(self, channels): super(CReLU_IN, self).__init__() self.bn = nn.InstanceNorm2d(channels * 2, eps=1e-05, momentum=0.1, affine=True) def forward(self, x): cat = torch.cat((x, -x), 1) x = self.bn(cat) return F.leaky_relu(x, 0.01, inplace=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 8 r2 = rindex x1 = xindex // 8 x3 = xindex tmp37 = tl.load(in_ptr1 + x3 % 8, xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + x3 % 8, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (r2 + 16 * x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1, 1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (r2 + 16 * (-4 + x0) + 64 * x1), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tl.where(xmask, tmp14, 0) tmp17 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tl.full([XBLOCK, 1], 16, tl.int32) tmp22 = tmp21.to(tl.float32) tmp23 = tmp20 / tmp22 tmp24 = tmp14 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp13 - tmp23 tmp31 = 16.0 tmp32 = tmp29 / tmp31 tmp33 = 1e-05 tmp34 = tmp32 + tmp33 tmp35 = libdevice.rsqrt(tmp34) tmp36 = tmp30 * tmp35 tmp38 = tmp36 * tmp37 tmp40 = tmp38 + tmp39 tmp41 = 0.0 tmp42 = tmp40 > tmp41 tmp43 = 0.01 tmp44 = tmp40 * tmp43 tmp45 = tl.where(tmp42, tmp40, tmp44) tmp46 = tmp45 > tmp41 tl.store(out_ptr0 + (r2 + 16 * x3), tmp13, xmask) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp45, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp46, xmask) tl.store(out_ptr4 + x3, tmp35, xmask) tl.store(out_ptr1 + x3, tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32 ) buf5 = empty_strided_cuda((1, 32, 4, 4), (512, 16, 4, 1), torch.float32 ) buf6 = reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_cat_leaky_relu_leaky_relu_backward_0[ grid(32)](buf6, primals_1, primals_2, primals_3, buf0, buf1, buf7, buf4, 32, 16, XBLOCK=32, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 return buf6, buf0, reinterpret_tensor(buf4, (32,), (1,), 0 ), buf7, reinterpret_tensor(buf1, (1, 32, 1, 1), (32, 1, 1, 1), 0) class CReLU_INNew(nn.Module): def __init__(self, channels): super(CReLU_INNew, self).__init__() self.bn = nn.InstanceNorm2d(channels * 2, eps=1e-05, momentum=0.1, affine=True) def forward(self, input_0): primals_2 = self.bn.weight primals_3 = self.bn.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dipikakhullar/ocr
CReLU_IN
false
15,188
[ "MIT" ]
284
a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
https://github.com/dipikakhullar/ocr/tree/a55e70d82f42803be5ed63f8f59e4fa597fcf8d6
BinaryCrossEntropyLabelSmooth
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3g/c3gv6xrfwg4qrthsqrquvehzxb7istocuk2s7rdunfu7x66sjhbv.py # Topologically Sorted Source Nodes: [mul, target, binary_cross_entropy_with_logits], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul_1, neg, sub, sub_1, sub_2 # mul => mul # target => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.9), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_mul_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp3 = 0.1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = triton_helpers.minimum(tmp9, tmp7) tmp11 = tl_math.abs(tmp7) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp16 = tmp8 - tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp19 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, target, binary_cross_entropy_with_logits], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class BinaryCrossEntropyLabelSmooth(torch.nn.BCEWithLogitsLoss): def __init__(self, num_classes, epsilon=0.1, weight=None, size_average= None, reduce=None, reduction='mean', pos_weight=None): super(BinaryCrossEntropyLabelSmooth, self).__init__(weight, size_average, reduce, reduction, pos_weight) self.num_classes = num_classes self.epsilon = epsilon def forward(self, input, target): target = (1 - self.epsilon) * target + self.epsilon return super(BinaryCrossEntropyLabelSmooth, self).forward(input, target ) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r0, None) tmp1 = 0.9 tmp2 = tmp0 * tmp1 tmp3 = 0.1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = triton_helpers.minimum(tmp9, tmp7) tmp11 = tl_math.abs(tmp7) tmp12 = -tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = libdevice.log1p(tmp13) tmp15 = tmp10 - tmp14 tmp16 = tmp8 - tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp19 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mul_0[grid(1)]( buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BinaryCrossEntropyLabelSmoothNew(torch.nn.BCEWithLogitsLoss): def __init__(self, num_classes, epsilon=0.1, weight=None, size_average= None, reduce=None, reduction='mean', pos_weight=None): super(BinaryCrossEntropyLabelSmoothNew, self).__init__(weight, size_average, reduce, reduction, pos_weight) self.num_classes = num_classes self.epsilon = epsilon def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dianjixz/AutoDL
BinaryCrossEntropyLabelSmooth
false
15,189
[ "Apache-2.0" ]
1,044
48db4eb04d55ce69e93d4a3bdc24592bdb34a868
https://github.com/dianjixz/AutoDL/tree/48db4eb04d55ce69e93d4a3bdc24592bdb34a868
ProteinResNetPooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rq/crq67an2qbnihu4lgijwmlkbxtlerrf4ah6jnhhssto73bmo5zvy.py # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_weights => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2k/c2kiox2wvshockbbzjlycxwhjeigavlrfwuvcpbcbxpipbm7d7k6.py # Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # pooled_output_1 => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights, matmul], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), buf2, out=buf3) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf5, primals_3, buf1, reinterpret_tensor(buf3, (4, 4), (4, 1), 0), buf5, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn class ProteinResNetPooler(nn.Module): def __init__(self, config): super().__init__() self.attention_weights = nn.Linear(config.hidden_size, 1) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states, mask=None): attention_scores = self.attention_weights(hidden_states) if mask is not None: attention_scores += -10000.0 * (1 - mask) attention_weights = torch.softmax(attention_scores, -1) weighted_mean_embedding = torch.matmul(hidden_states.transpose(1, 2 ), attention_weights).squeeze(2) pooled_output = self.dense(weighted_mean_embedding) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), buf2, out=buf3) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_tanh_1[grid(16)](buf5, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf5, primals_3, buf1, reinterpret_tensor(buf3, (4, 4), (4, 1), 0 ), buf5, primals_4 class ProteinResNetPoolerNew(nn.Module): def __init__(self, config): super().__init__() self.attention_weights = nn.Linear(config.hidden_size, 1) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, input_0): primals_1 = self.attention_weights.weight primals_2 = self.attention_weights.bias primals_4 = self.dense.weight primals_5 = self.dense.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
IC-hub/ProteinLM
ProteinResNetPooler
false
15,190
[ "Apache-2.0" ]
59
58fbf1f674569cf814becf32f71dd0d8f0c592fa
https://github.com/IC-hub/ProteinLM/tree/58fbf1f674569cf814becf32f71dd0d8f0c592fa
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a3/ca3vvpqpywk4rvgx62dwfu3n3zclaw4nyjbkbdcjlgl3synpwaxy.py # Topologically Sorted Source Nodes: [input_3, target_1, mul_2, a, mul_3, sum_2, mul_4, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # a => sum_1 # input_3 => mul # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # sum_2 => sum_2 # sum_3 => sum_3 # target_1 => mul_1 # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_2), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mul), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %mul_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp4 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp3 * tmp3 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp5 * tmp5 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr1 + (x0), tmp15, xmask) tl.store(out_ptr2 + (x0), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p4/cp4zb7h34ju4tzmweu56zah2tphr5lah7rtpq6penelo2tjsjqfm.py # Topologically Sorted Source Nodes: [mul_5, b, c, add_2, d, loss, loss_1, loss_2], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add_2 => add_2 # b => add # c => add_1 # d => div # loss => sub # loss_1 => mul_6 # loss_2 => mean # mul_5 => mul_5 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 0.001), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, 0.001), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_5, %add_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = 0.001 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp2 / tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp12 = tmp11 * tmp10 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [input_3, target_1, mul_2, a, mul_3, sum_2, mul_4, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg0_1, arg2_1, arg1_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_5, b, c, add_2, d, loss, loss_1, loss_2], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean] triton_per_fused_add_div_mean_mul_rsub_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, loss_weight=1.0): super(DiceLoss, self).__init__() self.loss_weight = loss_weight def forward(self, input, target, mask, reduce=True): batch_size = input.size(0) input = torch.sigmoid(input) input = input.contiguous().view(batch_size, -1) target = target.contiguous().view(batch_size, -1).float() mask = mask.contiguous().view(batch_size, -1).float() input = input * mask target = target * mask a = torch.sum(input * target, dim=1) b = torch.sum(input * input, dim=1) + 0.001 c = torch.sum(target * target, dim=1) + 0.001 d = 2 * a / (b + c) loss = 1 - d loss = self.loss_weight * loss if reduce: loss = torch.mean(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp4 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp3 * tmp3 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp5 * tmp5 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) tl.store(out_ptr2 + x0, tmp20, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = 0.001 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp2 / tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp12 = tmp11 * tmp10 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg2_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class DiceLossNew(nn.Module): def __init__(self, loss_weight=1.0): super(DiceLossNew, self).__init__() self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
doem97/PSENet
DiceLoss
false
15,192
[ "Apache-2.0" ]
1,213
4d95395658662f2223805c36dcd573d9e190ce26
https://github.com/doem97/PSENet/tree/4d95395658662f2223805c36dcd573d9e190ce26
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2wr2cvkya5clovpxidv7ia56pdcyp7uq4omtpg5m2nr7ya3ryn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (2, 64), (64, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 2), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(4, 64) self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64, 2) def forward(self, x): x = torch.tanh(self.fc1(x)) x = torch.tanh(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (2, 64), (64, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(4096)](buf1, primals_2, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(4096)](buf3, primals_5, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 2), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.fc1 = nn.Linear(4, 64) self.fc2 = nn.Linear(64, 64) self.fc3 = nn.Linear(64, 2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dongminlee94/Samsung-DRL-Code
Net
false
15,193
[ "MIT" ]
116
c96f8739a09cfd708c265954ee8ecf0ea3b67395
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
MNISTClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j6/cj6faeofhfnxsh5iuwazughjlau4igyajnmvjequyelq7apzs4qm.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wk/cwku3n3hlgzo3mrkiiqa7uewr5otcagdctuimd3p6okzg3mywawb.py # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_4 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/po/cpods5znsd2caqlcgpraxuuxzfbc42kpwgvbpe3n267wr56bv7rm.py # Topologically Sorted Source Nodes: [conv2d_4, x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.mean, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_6 => relu_4 # x_7 => mean # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_4, [-1, -2], True), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_per_fused_convolution_mean_relu_threshold_backward_5 = async_compile.triton('triton_per_fused_convolution_mean_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_mean_relu_threshold_backward_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (r2 + (256*x3)), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.0 tmp9 = tmp4 <= tmp8 tmp10 = 256.0 tmp11 = tmp7 / tmp10 tl.store(out_ptr0 + (r2 + (256*x3)), tmp9, None) tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32, ), (1, )) assert_size_stride(primals_12, (10, 32), (32, 1)) assert_size_stride(primals_13, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 131072, grid=grid(131072), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 16, 16), (8192, 256, 16, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 32768, grid=grid(32768), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 16, 16), (8192, 256, 16, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf11, primals_9, 32768, grid=grid(32768), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 32, 16, 16), (8192, 256, 16, 1)) buf13 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.bool) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.mean, aten.threshold_backward] triton_per_fused_convolution_mean_relu_threshold_backward_5.run(buf14, buf12, primals_11, buf16, 128, 256, grid=grid(128), stream=stream0) del buf12 del primals_11 buf15 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (4, 32), (32, 1), 0), reinterpret_tensor(primals_12, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf15) del primals_13 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf11, reinterpret_tensor(buf14, (4, 32), (32, 1), 0), primals_12, buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((10, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchvision import torchvision.ops from torch import nn class DeformableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False): super(DeformableConv2d, self).__init__() assert type(kernel_size) == tuple or type(kernel_size) == int kernel_size = kernel_size if type(kernel_size) == tuple else ( kernel_size, kernel_size) self.stride = stride if type(stride) == tuple else (stride, stride) self.padding = padding self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, stride=stride, padding =self.padding, bias=True) nn.init.constant_(self.offset_conv.weight, 0.0) nn.init.constant_(self.offset_conv.bias, 0.0) self.modulator_conv = nn.Conv2d(in_channels, 1 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, stride=stride, padding =self.padding, bias=True) nn.init.constant_(self.modulator_conv.weight, 0.0) nn.init.constant_(self.modulator_conv.bias, 0.0) self.regular_conv = nn.Conv2d(in_channels=in_channels, out_channels =out_channels, kernel_size=kernel_size, stride=stride, padding= self.padding, bias=bias) def forward(self, x): offset = self.offset_conv(x) modulator = 2.0 * torch.sigmoid(self.modulator_conv(x)) x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight= self.regular_conv.weight, bias=self.regular_conv.bias, padding= self.padding, mask=modulator, stride=self.stride) return x class MNISTClassifier(nn.Module): def __init__(self, deformable=False): super(MNISTClassifier, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) conv = nn.Conv2d if deformable is False else DeformableConv2d self.conv4 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True ) self.conv5 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True ) self.pool = nn.MaxPool2d(2) self.gap = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(32, 10) def forward(self, x): x = torch.relu(self.conv1(x)) x = self.pool(x) x = torch.relu(self.conv2(x)) x = self.pool(x) x = torch.relu(self.conv3(x)) x = torch.relu(self.conv4(x)) x = torch.relu(self.conv5(x)) x = self.gap(x) x = x.flatten(start_dim=1) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torchvision import torchvision.ops from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_per_fused_convolution_mean_relu_threshold_backward_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (r2 + 256 * x3), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.0 tmp9 = tmp4 <= tmp8 tmp10 = 256.0 tmp11 = tmp7 / tmp10 tl.store(out_ptr0 + (r2 + 256 * x3), tmp9, None) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp11, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (10, 32), (32, 1)) assert_size_stride(primals_13, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 16, 16), (8192, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(32768)](buf9, primals_7, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 16, 16), (8192, 256, 16, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_4[grid(32768)](buf11, primals_9, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 32, 16, 16), (8192, 256, 16, 1)) buf13 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf16 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.bool) buf14 = buf13 del buf13 triton_per_fused_convolution_mean_relu_threshold_backward_5[grid(128)]( buf14, buf12, primals_11, buf16, 128, 256, num_warps=2, num_stages=1) del buf12 del primals_11 buf15 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf14, (4, 32), (32, 1), 0), reinterpret_tensor(primals_12, (32, 10), (1, 32), 0), alpha=1, beta=1, out=buf15) del primals_13 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf11, reinterpret_tensor(buf14, (4, 32), (32, 1), 0), primals_12, buf16) class DeformableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False): super(DeformableConv2d, self).__init__() assert type(kernel_size) == tuple or type(kernel_size) == int kernel_size = kernel_size if type(kernel_size) == tuple else ( kernel_size, kernel_size) self.stride = stride if type(stride) == tuple else (stride, stride) self.padding = padding self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, stride=stride, padding =self.padding, bias=True) nn.init.constant_(self.offset_conv.weight, 0.0) nn.init.constant_(self.offset_conv.bias, 0.0) self.modulator_conv = nn.Conv2d(in_channels, 1 * kernel_size[0] * kernel_size[1], kernel_size=kernel_size, stride=stride, padding =self.padding, bias=True) nn.init.constant_(self.modulator_conv.weight, 0.0) nn.init.constant_(self.modulator_conv.bias, 0.0) self.regular_conv = nn.Conv2d(in_channels=in_channels, out_channels =out_channels, kernel_size=kernel_size, stride=stride, padding= self.padding, bias=bias) def forward(self, x): offset = self.offset_conv(x) modulator = 2.0 * torch.sigmoid(self.modulator_conv(x)) x = torchvision.ops.deform_conv2d(input=x, offset=offset, weight= self.regular_conv.weight, bias=self.regular_conv.bias, padding= self.padding, mask=modulator, stride=self.stride) return x class MNISTClassifierNew(nn.Module): def __init__(self, deformable=False): super(MNISTClassifierNew, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=True) conv = nn.Conv2d if deformable is False else DeformableConv2d self.conv4 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True ) self.conv5 = conv(32, 32, kernel_size=3, stride=1, padding=1, bias=True ) self.pool = nn.MaxPool2d(2) self.gap = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(32, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_12 = self.fc.weight primals_13 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
developer0hye/PyTorch-Deformable-Convolution-v2
MNISTClassifier
false
15,194
[ "MIT" ]
70
3ed601fa70ee111278b95b134caf29e085642bc2
https://github.com/developer0hye/PyTorch-Deformable-Convolution-v2/tree/3ed601fa70ee111278b95b134caf29e085642bc2
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/h7/ch7dmwygbglo3zs4wpwam24cjagxrnnl2qi3hvh2tiylczttd5l5.py # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] # Source node to ATen node mapping: # add => add # x => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sr/csrtmm7azkys4vr75beqpeookrn5idww5cf5bm6a66biq2ko74cb.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hu/chutdq63hpimahzlzsdep6fhal3pqff3nv4v7gy4qg5nx4zgso5a.py # Topologically Sorted Source Nodes: [conv1d_3, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv1d_3 => convolution_3 # x_2 => relu_2 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 2 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64), (256, 64, 1)) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64), (256, 64, 1)) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_0.run(buf2, buf1, 1024, grid=grid(1024), stream=stream0) del buf1 # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64), (256, 64, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, 1024, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (4, 2, 64), (128, 64, 1)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d_3, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf6, primals_6, buf7, 512, grid=grid(512), stream=stream0) del primals_6 return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5, buf2, buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable from torch.nn import Parameter class Conv1dExt(nn.Conv1d): def __init__(self, *args, **kwargs): super(Conv1dExt, self).__init__(*args, **kwargs) self.init_ncc() self.input_tied_modules = [] self.output_tied_modules = [] def init_ncc(self): w = self.weight.view(self.weight.size(0), -1) mean = torch.mean(w, dim=1).unsqueeze(1) self.t0_factor = w - mean self.t0_norm = torch.norm(w, p=2, dim=1) self.start_ncc = Variable(torch.zeros(self.out_channels)) self.start_ncc = self.normalized_cross_correlation() def normalized_cross_correlation(self): w = self.weight.view(self.weight.size(0), -1) t_norm = torch.norm(w, p=2, dim=1) if self.in_channels == 1 & sum(self.kernel_size) == 1: ncc = w.squeeze() / torch.norm(self.t0_norm, p=2) ncc = ncc - self.start_ncc return ncc mean = torch.mean(w, dim=1).unsqueeze(1) t_factor = w - mean h_product = self.t0_factor * t_factor cov = torch.sum(h_product, dim=1) denom = self.t0_norm * t_norm ncc = cov / denom ncc = ncc - self.start_ncc return ncc def split_output_channel(self, channel_i): """Split one output channel (a feature) into two, but retain summed value Args: channel_i: (int) number of channel to be split. the ith channel """ self.out_channels += 1 orig_weight = self.weight.data split_pos = 2 * torch.rand(self.in_channels, self.kernel_size[0]) new_weight = torch.zeros(self.out_channels, self.in_channels, self. kernel_size[0]) if channel_i > 0: new_weight[:channel_i, :, :] = orig_weight[:channel_i, :, :] new_weight[channel_i, :, :] = orig_weight[channel_i, :, :] * split_pos new_weight[channel_i + 1, :, :] = orig_weight[channel_i, :, :] * (2 - split_pos) if channel_i + 2 < self.out_channels: new_weight[channel_i + 2, :, :] = orig_weight[channel_i + 1, :, :] if self.bias is not None: orig_bias = self.bias.data new_bias = torch.zeros(self.out_channels) new_bias[:channel_i + 1] = orig_bias[:channel_i + 1] new_bias[channel_i + 1:] = orig_bias[channel_i:] self.bias = Parameter(new_bias) self.weight = Parameter(new_weight) self.init_ncc() def split_input_channel(self, channel_i): if channel_i > self.in_channels: None return self.in_channels += 1 orig_weight = self.weight.data dup_slice = orig_weight[:, channel_i, :] * 0.5 new_weight = torch.zeros(self.out_channels, self.in_channels, self. kernel_size[0]) if channel_i > 0: new_weight[:, :channel_i, :] = orig_weight[:, :channel_i, :] new_weight[:, channel_i, :] = dup_slice new_weight[:, channel_i + 1, :] = dup_slice if channel_i + 1 < self.in_channels: new_weight[:, channel_i + 2, :] = orig_weight[:, channel_i + 1, :] self.weight = Parameter(new_weight) self.init_ncc() def split_feature(self, feature_i): """Splits feature in output and input channels Args: feature_i: (int) """ self.split_output_channel(channel_i=feature_i) for dep in self.input_tied_modules: dep.split_input_channel(channel_i=feature_i) for dep in self.output_tied_modules: dep.split_output_channel(channel_i=feature_i) def split_features(self, threshold): """Decides which features to split if they are below a specific threshold Args: threshold: (float?) less than 1. """ ncc = self.normalized_cross_correlation() for i, ncc_val in enumerate(ncc): if ncc_val < threshold: None self.split_feature(i) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1, bias=False) self.conv2 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1, bias=False) self.conv3 = Conv1dExt(in_channels=4, out_channels=4, kernel_size=1, bias=False) self.conv4 = Conv1dExt(in_channels=4, out_channels=2, kernel_size=1, bias=True) self.conv1.input_tied_modules = [self.conv3] self.conv1.output_tied_modules = [self.conv2] self.conv2.input_tied_modules = [self.conv3] self.conv2.output_tied_modules = [self.conv1] self.conv3.input_tied_modules = [self.conv4] def forward(self, x): x1 = self.conv1(x) x2 = self.conv2(x) x = nn.functional.relu(x1 + x2) x = nn.functional.relu(self.conv3(x)) x = nn.functional.relu(self.conv4(x)) return x def get_inputs(): return [torch.rand([4, 1, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.autograd import Variable from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64), (256, 64, 1)) buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64), (256, 64, 1)) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(1024)](buf2, buf1, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del buf1 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64), (256, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(1024)](buf4, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 2, 64), (128, 64, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(512)](buf6, primals_6, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return (buf6, primals_1, primals_2, primals_3, primals_4, primals_5, buf2, buf4, buf7) class Conv1dExt(nn.Conv1d): def __init__(self, *args, **kwargs): super(Conv1dExt, self).__init__(*args, **kwargs) self.init_ncc() self.input_tied_modules = [] self.output_tied_modules = [] def init_ncc(self): w = self.weight.view(self.weight.size(0), -1) mean = torch.mean(w, dim=1).unsqueeze(1) self.t0_factor = w - mean self.t0_norm = torch.norm(w, p=2, dim=1) self.start_ncc = Variable(torch.zeros(self.out_channels)) self.start_ncc = self.normalized_cross_correlation() def normalized_cross_correlation(self): w = self.weight.view(self.weight.size(0), -1) t_norm = torch.norm(w, p=2, dim=1) if self.in_channels == 1 & sum(self.kernel_size) == 1: ncc = w.squeeze() / torch.norm(self.t0_norm, p=2) ncc = ncc - self.start_ncc return ncc mean = torch.mean(w, dim=1).unsqueeze(1) t_factor = w - mean h_product = self.t0_factor * t_factor cov = torch.sum(h_product, dim=1) denom = self.t0_norm * t_norm ncc = cov / denom ncc = ncc - self.start_ncc return ncc def split_output_channel(self, channel_i): """Split one output channel (a feature) into two, but retain summed value Args: channel_i: (int) number of channel to be split. the ith channel """ self.out_channels += 1 orig_weight = self.weight.data split_pos = 2 * torch.rand(self.in_channels, self.kernel_size[0]) new_weight = torch.zeros(self.out_channels, self.in_channels, self. kernel_size[0]) if channel_i > 0: new_weight[:channel_i, :, :] = orig_weight[:channel_i, :, :] new_weight[channel_i, :, :] = orig_weight[channel_i, :, :] * split_pos new_weight[channel_i + 1, :, :] = orig_weight[channel_i, :, :] * (2 - split_pos) if channel_i + 2 < self.out_channels: new_weight[channel_i + 2, :, :] = orig_weight[channel_i + 1, :, :] if self.bias is not None: orig_bias = self.bias.data new_bias = torch.zeros(self.out_channels) new_bias[:channel_i + 1] = orig_bias[:channel_i + 1] new_bias[channel_i + 1:] = orig_bias[channel_i:] self.bias = Parameter(new_bias) self.weight = Parameter(new_weight) self.init_ncc() def split_input_channel(self, channel_i): if channel_i > self.in_channels: None return self.in_channels += 1 orig_weight = self.weight.data dup_slice = orig_weight[:, channel_i, :] * 0.5 new_weight = torch.zeros(self.out_channels, self.in_channels, self. kernel_size[0]) if channel_i > 0: new_weight[:, :channel_i, :] = orig_weight[:, :channel_i, :] new_weight[:, channel_i, :] = dup_slice new_weight[:, channel_i + 1, :] = dup_slice if channel_i + 1 < self.in_channels: new_weight[:, channel_i + 2, :] = orig_weight[:, channel_i + 1, :] self.weight = Parameter(new_weight) self.init_ncc() def split_feature(self, feature_i): """Splits feature in output and input channels Args: feature_i: (int) """ self.split_output_channel(channel_i=feature_i) for dep in self.input_tied_modules: dep.split_input_channel(channel_i=feature_i) for dep in self.output_tied_modules: dep.split_output_channel(channel_i=feature_i) def split_features(self, threshold): """Decides which features to split if they are below a specific threshold Args: threshold: (float?) less than 1. """ ncc = self.normalized_cross_correlation() for i, ncc_val in enumerate(ncc): if ncc_val < threshold: None self.split_feature(i) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1, bias=False) self.conv2 = Conv1dExt(in_channels=1, out_channels=4, kernel_size=1, bias=False) self.conv3 = Conv1dExt(in_channels=4, out_channels=4, kernel_size=1, bias=False) self.conv4 = Conv1dExt(in_channels=4, out_channels=2, kernel_size=1, bias=True) self.conv1.input_tied_modules = [self.conv3] self.conv1.output_tied_modules = [self.conv2] self.conv2.input_tied_modules = [self.conv3] self.conv2.output_tied_modules = [self.conv1] self.conv3.input_tied_modules = [self.conv4] def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv4.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dhpollack/fast-wavenet.pytorch
Net
false
15,195
[ "MIT" ]
98
853f6ecb1e8d23a5c01fc2455640c6637d30f2f9
https://github.com/dhpollack/fast-wavenet.pytorch/tree/853f6ecb1e8d23a5c01fc2455640c6637d30f2f9
ReduceBranch
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qn/cqnqbqlcpg6bgcau35r5lxgdywlkgto5q42fnp26ywsu7emttdb4.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [1, 1], [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w6/cw6qvvmrb2bbscjpx3bepkkve3xteyufgtjzrferghxid6245h3w.py # Topologically Sorted Source Nodes: [shift_x_1, avg_pool2d_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d_1 => avg_pool2d_1 # shift_x_1 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%slice_4, [0, 1, 0, 1], 0.0), kwargs = {}) # %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [1, 1], [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x3 = (xindex // 2) x4 = xindex tmp0 = 2*x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = 2*x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x4), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kf/ckfghrkocduvmovgzgimf5n4tizoinlqpdnmgn4qbkr27p4qmmji.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %convolution_1], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 8 x0 = xindex % 4 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [shift_x_1, avg_pool2d_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] triton_poi_fused_avg_pool2d_constant_pad_nd_1.run(primals_1, buf2, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 2, 2), (16, 4, 2, 1)) buf4 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf1, buf3, buf4, 128, grid=grid(128), stream=stream0) del buf1 del buf3 return (buf4, primals_2, primals_3, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ReduceBranch(nn.Module): def __init__(self, planes, stride=2): super(ReduceBranch, self).__init__() self.conv1 = nn.Conv2d(planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.avg_pool = nn.AvgPool2d(kernel_size=1, stride=stride, padding=0) def forward(self, x): out1 = self.conv1(self.avg_pool(x)) shift_x = x[:, :, 1:, 1:] shift_x = F.pad(shift_x, (0, 1, 0, 1)) out2 = self.conv2(self.avg_pool(shift_x)) out = torch.cat([out1, out2], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x3 = xindex // 2 x4 = xindex tmp0 = 2 * x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = 2 * x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK =64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_constant_pad_nd_1[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 2, 2), (16, 4, 2, 1)) buf4 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 return buf4, primals_2, primals_3, buf0, buf2 class ReduceBranchNew(nn.Module): def __init__(self, planes, stride=2): super(ReduceBranchNew, self).__init__() self.conv1 = nn.Conv2d(planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.avg_pool = nn.AvgPool2d(kernel_size=1, stride=stride, padding=0) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dnddnjs/pytorch-vision
ReduceBranch
false
15,196
[ "MIT" ]
48
d432b467774f838bef37372d6cff3576c6559803
https://github.com/dnddnjs/pytorch-vision/tree/d432b467774f838bef37372d6cff3576c6559803
InstanceNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bs/cbsesmufvlsvem6vcuredxexiwyhk2epsjxb3ein6p73w3uyowwr.py # Topologically Sorted Source Nodes: [mean, centered_x, pow_1, mean_1, add, std, mul_1, output], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # centered_x => sub # mean => mean # mean_1 => mean_1 # mul_1 => mul_1 # output => add_1 # pow_1 => pow_1 # std => rsqrt # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {}) triton_per_fused_add_mean_mul_pow_rsqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_rsqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_rsqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_rsqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp12 / tmp5 tmp14 = 1e-05 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp7 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp16, xmask) tl.store(out_ptr0 + (r1 + (16*x0)), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, centered_x, pow_1, mean_1, add, std, mul_1, output], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_pow_rsqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 16, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 return (buf4, primals_1, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter class InstanceNorm(nn.Module): def __init__(self, num_features, affine=True, eps=1e-05): """`num_features` number of feature channels """ super(InstanceNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.scale = Parameter(torch.Tensor(num_features)) self.shift = Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): if self.affine: self.scale.data.normal_(mean=0.0, std=0.02) self.shift.data.zero_() def forward(self, input): size = input.size() x_reshaped = input.view(size[0], size[1], size[2] * size[3]) mean = x_reshaped.mean(2, keepdim=True) centered_x = x_reshaped - mean std = torch.rsqrt((centered_x ** 2).mean(2, keepdim=True) + self.eps) norm_features = (centered_x * std).view(*size) if self.affine: output = norm_features * self.scale[:, None, None] + self.shift[ :, None, None] else: output = norm_features return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_rsqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp12 / tmp5 tmp14 = 1e-05 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp7 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp16, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_rsqrt_sub_0[grid(16)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf1, buf3 class InstanceNormNew(nn.Module): def __init__(self, num_features, affine=True, eps=1e-05): """`num_features` number of feature channels """ super(InstanceNormNew, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.scale = Parameter(torch.Tensor(num_features)) self.shift = Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): if self.affine: self.scale.data.normal_(mean=0.0, std=0.02) self.shift.data.zero_() def forward(self, input_0): primals_2 = self.scale primals_3 = self.shift primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
doantientai/augmented_cyclegan
InstanceNorm
false
15,197
[ "MIT" ]
133
821274577e71c412198356ad6302c982554d558c
https://github.com/doantientai/augmented_cyclegan/tree/821274577e71c412198356ad6302c982554d558c
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qi/cqi77u6ojxdkp4u2ifgsif627yufkzndt2xkn37vrg7afff5ruhh.py # Topologically Sorted Source Nodes: [log_std_1, std], Original ATen: [aten.clamp, aten.exp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # log_std_1 => clamp_max, clamp_min # std => exp # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_7, -20), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%clamp_max,), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_7, -20), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_7, 2), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_exp_ge_le_logical_and_1 = async_compile.triton('triton_poi_fused_clamp_exp_ge_le_logical_and_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_exp_ge_le_logical_and_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_exp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 >= tmp3 tmp9 = tmp2 <= tmp5 tmp10 = tmp8 & tmp9 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [log_std_1, std], Original ATen: [aten.clamp, aten.exp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_exp_ge_le_logical_and_1.run(buf5, primals_9, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_9 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, buf7, primals_8, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Actor(nn.Module): def __init__(self, state_size, action_size, args, log_std_min=-20, log_std_max=2): super(Actor, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.fc1 = nn.Linear(state_size, args.hidden_size) self.fc2 = nn.Linear(args.hidden_size, args.hidden_size) self.fc3 = nn.Linear(args.hidden_size, action_size) self.fc4 = nn.Linear(args.hidden_size, action_size) def forward(self, x): x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) mu = self.fc3(x) log_std = self.fc4(x) log_std = torch.clamp(log_std, min=self.log_std_min, max=self. log_std_max) std = torch.exp(log_std) return mu, std def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'args': _mock_config( hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_clamp_exp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 >= tmp3 tmp9 = tmp2 <= tmp5 tmp10 = tmp8 & tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_exp_ge_le_logical_and_1[grid(256)](buf5, primals_9, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_9 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0 ), buf6, buf7, primals_8, primals_6, buf8, primals_4, buf9 class ActorNew(nn.Module): def __init__(self, state_size, action_size, args, log_std_min=-20, log_std_max=2): super(ActorNew, self).__init__() self.log_std_min = log_std_min self.log_std_max = log_std_max self.fc1 = nn.Linear(state_size, args.hidden_size) self.fc2 = nn.Linear(args.hidden_size, args.hidden_size) self.fc3 = nn.Linear(args.hidden_size, action_size) self.fc4 = nn.Linear(args.hidden_size, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
dongminlee94/Samsung-DRL-Code
Actor
false
15,198
[ "MIT" ]
116
c96f8739a09cfd708c265954ee8ecf0ea3b67395
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
MultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3u/c3ui6pflkqqmwiicu3k3k6nxfn3zxrzgar4nyb7sxfkreg6ab7we.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_0.run(buf3, buf6, 64, 16, grid=grid(64), stream=stream0) del buf3 buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attended], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8) return (reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F def same_tensor(tensor, *args): """ Do the input tensors all point to the same underlying data """ for other in args: if not torch.is_tensor(other): return False if tensor.device != other.device: return False if tensor.dtype != other.dtype: return False if tensor.data_ptr() != other.data_ptr(): return False return True class MultiHeadedAttention(nn.Module): """ Implement a multi-headed attention module """ def __init__(self, embed_dim, num_heads=1): """ Initialize the attention module """ super(MultiHeadedAttention, self).__init__() assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}' self.embed_dim = embed_dim self.num_heads = num_heads self.projection_dim = embed_dim // num_heads self.scale = self.projection_dim ** -0.5 self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False) self.reset_parameters() def reset_parameters(self): """ Reset parameters using xavier initialization """ gain = nn.init.calculate_gain('linear') nn.init.xavier_uniform_(self.input_weights, gain) nn.init.xavier_uniform_(self.output_projection.weight, gain) def project(self, inputs, index=0, chunks=1): """ Produce a linear projection using the weights """ batch_size = inputs.shape[0] start = index * self.embed_dim end = start + chunks * self.embed_dim projections = F.linear(inputs, self.input_weights[start:end]).chunk( chunks, dim=-1) output_projections = [] for projection in projections: output_projections.append(projection.view(batch_size, -1, self. num_heads, self.projection_dim).transpose(2, 1).contiguous( ).view(batch_size * self.num_heads, -1, self.projection_dim)) return output_projections def attention(self, values, keys, queries, key_mask=None, mask=None): """ Scaled dot product attention with optional masks """ logits = self.scale * torch.bmm(queries, keys.transpose(2, 1)) if mask is not None: logits += mask if key_mask is not None: logits_shape = logits.shape batch_size = logits_shape[0] // self.num_heads logits = logits.view(batch_size, self.num_heads, logits_shape[1 ], logits_shape[2]) logits.masked_fill_(key_mask[:, None, None], float('-inf')) logits = logits.view(logits_shape) attended = torch.bmm(F.softmax(logits, dim=-1), values) batch_size = queries.shape[0] // self.num_heads return attended.view(batch_size, self.num_heads, -1, self. projection_dim).transpose(2, 1).contiguous().view(batch_size, - 1, self.num_heads * self.projection_dim) def forward(self, values, keys, queries, key_mask=None, attention_mask= None, num_queries=0): """ Forward pass of the attention """ if same_tensor(values, keys, queries): values, keys, queries = self.project(values, chunks=3) elif same_tensor(values, keys): values, keys = self.project(values, chunks=2) queries, = self.project(queries, 2) else: values, = self.project(values, 0) keys, = self.project(keys, 1) queries, = self.project(queries, 2) if num_queries: queries = queries[:, -num_queries:] attended = self.attention(values, keys, queries, key_mask, attention_mask) return self.output_projection(attended) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf3 buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8) return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0 ), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0) def same_tensor(tensor, *args): """ Do the input tensors all point to the same underlying data """ for other in args: if not torch.is_tensor(other): return False if tensor.device != other.device: return False if tensor.dtype != other.dtype: return False if tensor.data_ptr() != other.data_ptr(): return False return True class MultiHeadedAttentionNew(nn.Module): """ Implement a multi-headed attention module """ def __init__(self, embed_dim, num_heads=1): """ Initialize the attention module """ super(MultiHeadedAttentionNew, self).__init__() assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}' self.embed_dim = embed_dim self.num_heads = num_heads self.projection_dim = embed_dim // num_heads self.scale = self.projection_dim ** -0.5 self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False) self.reset_parameters() def reset_parameters(self): """ Reset parameters using xavier initialization """ gain = nn.init.calculate_gain('linear') nn.init.xavier_uniform_(self.input_weights, gain) nn.init.xavier_uniform_(self.output_projection.weight, gain) def project(self, inputs, index=0, chunks=1): """ Produce a linear projection using the weights """ batch_size = inputs.shape[0] start = index * self.embed_dim end = start + chunks * self.embed_dim projections = F.linear(inputs, self.input_weights[start:end]).chunk( chunks, dim=-1) output_projections = [] for projection in projections: output_projections.append(projection.view(batch_size, -1, self. num_heads, self.projection_dim).transpose(2, 1).contiguous( ).view(batch_size * self.num_heads, -1, self.projection_dim)) return output_projections def attention(self, values, keys, queries, key_mask=None, mask=None): """ Scaled dot product attention with optional masks """ logits = self.scale * torch.bmm(queries, keys.transpose(2, 1)) if mask is not None: logits += mask if key_mask is not None: logits_shape = logits.shape batch_size = logits_shape[0] // self.num_heads logits = logits.view(batch_size, self.num_heads, logits_shape[1 ], logits_shape[2]) logits.masked_fill_(key_mask[:, None, None], float('-inf')) logits = logits.view(logits_shape) attended = torch.bmm(F.softmax(logits, dim=-1), values) batch_size = queries.shape[0] // self.num_heads return attended.view(batch_size, self.num_heads, -1, self. projection_dim).transpose(2, 1).contiguous().view(batch_size, - 1, self.num_heads * self.projection_dim) def forward(self, input_0, input_1, input_2): primals_2 = self.input_weights primals_5 = self.output_projection.weight primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dojoteef/synst
MultiHeadedAttention
false
15,199
[ "BSD-3-Clause" ]
81
a1842682cf757e8a501cd9cee16f20e1a14158f1
https://github.com/dojoteef/synst/tree/a1842682cf757e8a501cd9cee16f20e1a14158f1
GeneralizedMeanPooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xs/cxsstbq3wbw7zaisbdiefnedbcsbrsh6zg7qkoguxoj7qqghnei2.py # Topologically Sorted Source Nodes: [clamp, x, adaptive_avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool2d => mean # clamp => clamp_min # pow_2 => pow_2 # x => pow_1 # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 4.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1, -2], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 0.25), kwargs = {}) triton_per_fused_clamp_mean_pow_0 = async_compile.triton('triton_per_fused_clamp_mean_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_mean_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = 16.0 tmp10 = tmp8 / tmp9 tmp11 = 0.25 tmp12 = libdevice.pow(tmp10, tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [clamp, x, adaptive_avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_clamp_mean_pow_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules import Module class GeneralizedMeanPooling(Module): """Applies a 2D power-average adaptive pooling over an input signal composed of several input planes. The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` - At p = infinity, one gets Max Pooling - At p = 1, one gets Average Pooling The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. """ def __init__(self, norm, output_size=1, eps=1e-06): super(GeneralizedMeanPooling, self).__init__() assert norm > 0 self.p = float(norm) self.output_size = output_size self.eps = eps def forward(self, x): x = x.clamp(min=self.eps).pow(self.p) return F.adaptive_avg_pool2d(x, self.output_size).pow(1.0 / self.p) def __repr__(self): return self.__class__.__name__ + '(' + str(self.p ) + ', ' + 'output_size=' + str(self.output_size) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'norm': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn.modules import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = 16.0 tmp10 = tmp8 / tmp9 tmp11 = 0.25 tmp12 = libdevice.pow(tmp10, tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class GeneralizedMeanPoolingNew(Module): """Applies a 2D power-average adaptive pooling over an input signal composed of several input planes. The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` - At p = infinity, one gets Max Pooling - At p = 1, one gets Average Pooling The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. """ def __init__(self, norm, output_size=1, eps=1e-06): super(GeneralizedMeanPoolingNew, self).__init__() assert norm > 0 self.p = float(norm) self.output_size = output_size self.eps = eps def __repr__(self): return self.__class__.__name__ + '(' + str(self.p ) + ', ' + 'output_size=' + str(self.output_size) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dongan-beta/deep-image-retrieval
GeneralizedMeanPooling
false
15,200
[ "BSD-3-Clause" ]
253
3e0885f88da328aefb7abb2fa350f8860a4bd52d
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
TripletLogExpLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2u3chgeorw6tm3hldz3fjzmdh6mfkgfvuaeu4xcp3hxjcv6bwn.py # Topologically Sorted Source Nodes: [d_p, d_n, sub, exp, add, dist, loss], Original ATen: [aten.sub, aten.add, aten.norm, aten.exp, aten.log, aten.mean] # Source node to ATen node mapping: # add => add_2 # d_n => add_1, pow_3, pow_4, sub_1, sum_2 # d_p => add, pow_1, pow_2, sub, sum_1 # dist => log # exp => exp # loss => mean # sub => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub_1, 1e-06), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {}) triton_per_fused_add_exp_log_mean_norm_sub_0 = async_compile.triton('triton_per_fused_add_exp_log_mean_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_mean_norm_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp26 = tmp0 - tmp25 tmp27 = tmp26 + tmp3 tmp28 = tmp27 * tmp27 tmp30 = tmp6 - tmp29 tmp31 = tmp30 + tmp3 tmp32 = tmp31 * tmp31 tmp33 = tmp28 + tmp32 tmp35 = tmp12 - tmp34 tmp36 = tmp35 + tmp3 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp40 = tmp18 - tmp39 tmp41 = tmp40 + tmp3 tmp42 = tmp41 * tmp41 tmp43 = tmp38 + tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = tmp24 - tmp44 tmp46 = tl_math.exp(tmp45) tmp47 = 1.0 tmp48 = tmp46 + tmp47 tmp49 = tl_math.log(tmp48) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 4.0 tmp54 = tmp52 / tmp53 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp54, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [d_p, d_n, sub, exp, add, dist, loss], Original ATen: [aten.sub, aten.add, aten.norm, aten.exp, aten.log, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_exp_log_mean_norm_sub_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn class TripletLogExpLoss(nn.Module): """Creates a criterion that measures the triplet loss given an input tensors x1, x2, x3. This is used for measuring a relative similarity between samples. A triplet is composed by `a`, `p` and `n`: anchor, positive examples and negative example respectively. The shape of all input variables should be :math:`(N, D)`. The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label Image Classification`_ by Y. Li et al. .. math:: L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right) Args: anchor: anchor input tensor positive: positive input tensor negative: negative input tensor Shape: - Input: :math:`(N, D)` where `D = vector dimension` - Output: :math:`(N, 1)` >>> triplet_loss = nn.TripletLogExpLoss(p=2) >>> input1 = autograd.Variable(torch.randn(100, 128)) >>> input2 = autograd.Variable(torch.randn(100, 128)) >>> input3 = autograd.Variable(torch.randn(100, 128)) >>> output = triplet_loss(input1, input2, input3) >>> output.backward() .. _Learning shallow convolutional feature descriptors with triplet losses: http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf """ def __init__(self, p=2, eps=1e-06, swap=False): super(TripletLogExpLoss, self).__init__() self.p = p self.eps = eps self.swap = swap def forward(self, anchor, positive, negative): assert anchor.size() == positive.size( ), 'Input sizes between positive and negative must be equal.' assert anchor.size() == negative.size( ), 'Input sizes between anchor and negative must be equal.' assert positive.size() == negative.size( ), 'Input sizes between positive and negative must be equal.' assert anchor.dim() == 2, 'Input must be a 2D matrix.' d_p = F.pairwise_distance(anchor, positive, self.p, self.eps) d_n = F.pairwise_distance(anchor, negative, self.p, self.eps) if self.swap: d_s = F.pairwise_distance(positive, negative, self.p, self.eps) d_n = torch.min(d_n, d_s) dist = torch.log(1 + torch.exp(d_p - d_n)) loss = torch.mean(dist) return loss def eval_func(self, dp, dn): return np.log(1 + np.exp(dp - dn)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_log_mean_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp26 = tmp0 - tmp25 tmp27 = tmp26 + tmp3 tmp28 = tmp27 * tmp27 tmp30 = tmp6 - tmp29 tmp31 = tmp30 + tmp3 tmp32 = tmp31 * tmp31 tmp33 = tmp28 + tmp32 tmp35 = tmp12 - tmp34 tmp36 = tmp35 + tmp3 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp40 = tmp18 - tmp39 tmp41 = tmp40 + tmp3 tmp42 = tmp41 * tmp41 tmp43 = tmp38 + tmp42 tmp44 = libdevice.sqrt(tmp43) tmp45 = tmp24 - tmp44 tmp46 = tl_math.exp(tmp45) tmp47 = 1.0 tmp48 = tmp46 + tmp47 tmp49 = tl_math.log(tmp48) tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 4.0 tmp54 = tmp52 / tmp53 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp54, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_exp_log_mean_norm_sub_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class TripletLogExpLossNew(nn.Module): """Creates a criterion that measures the triplet loss given an input tensors x1, x2, x3. This is used for measuring a relative similarity between samples. A triplet is composed by `a`, `p` and `n`: anchor, positive examples and negative example respectively. The shape of all input variables should be :math:`(N, D)`. The distance is described in detail in the paper `Improving Pairwise Ranking for Multi-Label Image Classification`_ by Y. Li et al. .. math:: L(a, p, n) = log \\left( 1 + exp(d(a_i, p_i) - d(a_i, n_i) \\right) Args: anchor: anchor input tensor positive: positive input tensor negative: negative input tensor Shape: - Input: :math:`(N, D)` where `D = vector dimension` - Output: :math:`(N, 1)` >>> triplet_loss = nn.TripletLogExpLoss(p=2) >>> input1 = autograd.Variable(torch.randn(100, 128)) >>> input2 = autograd.Variable(torch.randn(100, 128)) >>> input3 = autograd.Variable(torch.randn(100, 128)) >>> output = triplet_loss(input1, input2, input3) >>> output.backward() .. _Learning shallow convolutional feature descriptors with triplet losses: http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf """ def __init__(self, p=2, eps=1e-06, swap=False): super(TripletLogExpLossNew, self).__init__() self.p = p self.eps = eps self.swap = swap def eval_func(self, dp, dn): return np.log(1 + np.exp(dp - dn)) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
dongan-beta/deep-image-retrieval
TripletLogExpLoss
false
15,201
[ "BSD-3-Clause" ]
253
3e0885f88da328aefb7abb2fa350f8860a4bd52d
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
APLoss_dist
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/k2/ck266gaihvgoxly4bowauykafsvsnnafl6krevx6lg3fdryk46d2.py # Topologically Sorted Source Nodes: [mul, sub, sqrt, d], Original ATen: [aten.mul, aten.rsub, aten.sqrt] # Source node to ATen node mapping: # d => sub_1 # mul => mul # sqrt => sqrt # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.001, %mul), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sub,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sqrt), kwargs = {}) triton_poi_fused_mul_rsub_sqrt_0 = async_compile.triton('triton_poi_fused_mul_rsub_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 2.001 tmp4 = tmp3 - tmp2 tmp5 = libdevice.sqrt(tmp4) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nd/cndxcqxuvs23ysbpwbbandbdmscpjru5cc77ujvj233azqylzu24.py # Topologically Sorted Source Nodes: [min_1, q_1, mul_1, rec, cumsum, nbs, cumsum_1, add, prec, sum_3, rec_1, mul_2, ap], Original ATen: [aten.minimum, aten.clamp, aten.mul, aten.sum, aten.cumsum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # ap => sum_4 # cumsum => cumsum # cumsum_1 => cumsum_1 # min_1 => minimum # mul_1 => mul_1 # mul_2 => mul_2 # nbs => sum_1 # prec => div # q_1 => clamp_min # rec => sum_2 # rec_1 => div_1 # sum_3 => sum_3 # Graph fragment: # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%slice_2, %slice_4), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%minimum, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, %view), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sum_2, -1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {}) # %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sum_1, -1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum_1, 1e-16), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum, %add), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [-1]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %unsqueeze_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1 = async_compile.triton('triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[4, 32], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 14, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + ((4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (100 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr0 + (101 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr0 + (102 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (3 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr0 + (103 + (4*r1) + (200*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.minimum(tmp2, tmp5) tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp10 = tmp8 * tmp9 tmp12 = tmp11 + tmp1 tmp14 = tmp13 + tmp4 tmp15 = triton_helpers.minimum(tmp12, tmp14) tmp16 = triton_helpers.maximum(tmp15, tmp7) tmp18 = tmp16 * tmp17 tmp19 = tmp10 + tmp18 tmp21 = tmp20 + tmp1 tmp23 = tmp22 + tmp4 tmp24 = triton_helpers.minimum(tmp21, tmp23) tmp25 = triton_helpers.maximum(tmp24, tmp7) tmp27 = tmp25 * tmp26 tmp28 = tmp19 + tmp27 tmp30 = tmp29 + tmp1 tmp32 = tmp31 + tmp4 tmp33 = triton_helpers.minimum(tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp7) tmp36 = tmp34 * tmp35 tmp37 = tmp28 + tmp36 tmp38 = tmp8 + tmp16 tmp39 = tmp38 + tmp25 tmp40 = tmp39 + tmp34 tmp41 = tmp40.to(tl.float32) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0) tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp46 = tl.where(rmask & xmask, tmp44, 0) tmp47 = tl.sum(tmp46, 1)[:, None] tmp48 = tmp37.to(tl.float32) tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0) tmp51 = 1e-16 tmp52 = tmp43 + tmp51 tmp53 = tmp50 / tmp52 tmp54 = tmp37 / tmp47 tmp55 = tmp53 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(rmask & xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tl.store(in_out_ptr1 + (x0), tmp59, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u2/cu2t6bkeledk75diwog42b5g2ekxve4rpnk5loywlit7ulhimxzl.py # Topologically Sorted Source Nodes: [mean, sub_2], Original ATen: [aten.mean, aten.rsub] # Source node to ATen node mapping: # mean => mean # sub_2 => sub_2 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {}) triton_per_fused_mean_rsub_2 = async_compile.triton('triton_per_fused_mean_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 / tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1)) assert_size_stride(arg3_1, (50, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sub, sqrt, d], Original ATen: [aten.mul, aten.rsub, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_mul_rsub_sqrt_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 50, 4), (200, 4, 1)) del arg2_1 del buf0 buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [min_1, q_1, mul_1, rec, cumsum, nbs, cumsum_1, add, prec, sum_3, rec_1, mul_2, ap], Original ATen: [aten.minimum, aten.clamp, aten.mul, aten.sum, aten.cumsum, aten.add, aten.div] triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1.run(buf7, buf1, arg3_1, arg1_1, 4, 25, grid=grid(4), stream=stream0) del arg1_1 del arg3_1 del buf1 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [mean, sub_2], Original ATen: [aten.mean, aten.rsub] triton_per_fused_mean_rsub_2.run(buf9, buf7, 1, 4, grid=grid(1), stream=stream0) del buf7 return (buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((50, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn def sim_to_dist(scores): return 1 - torch.sqrt(2.001 - 2 * scores) class APLoss(nn.Module): """ Differentiable AP loss, through quantization. From the paper: Learning with Average Precision: Training Image Retrieval with a Listwise Loss Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza https://arxiv.org/abs/1906.07589 Input: (N, M) values in [min, max] label: (N, M) values in {0, 1} Returns: 1 - mAP (mean AP for each n in {1..N}) Note: typically, this is what you wanna minimize """ def __init__(self, nq=25, min=0, max=1): nn.Module.__init__(self) assert isinstance(nq, int) and 2 <= nq <= 100 self.nq = nq self.min = min self.max = max gap = max - min assert gap > 0 self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True) q.weight = nn.Parameter(q.weight.detach(), requires_grad=False) q.bias = nn.Parameter(q.bias.detach(), requires_grad=False) a = (nq - 1) / gap q.weight[:nq] = -a q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1)) q.weight[nq:] = a q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min) q.weight[0] = q.weight[-1] = 0 q.bias[0] = q.bias[-1] = 1 def forward(self, x, label, qw=None, ret='1-mAP'): assert x.shape == label.shape N, M = x.shape q = self.quantizer(x.unsqueeze(1)) q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) nbs = q.sum(dim=-1) rec = (q * label.view(N, 1, M).float()).sum(dim=-1) prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) rec /= rec.sum(dim=-1).unsqueeze(1) ap = (prec * rec).sum(dim=-1) if ret == '1-mAP': if qw is not None: ap *= qw return 1 - ap.mean() elif ret == 'AP': assert qw is None return ap else: raise ValueError('Bad return type for APLoss(): %s' % str(ret)) def measures(self, x, gt, loss=None): if loss is None: loss = self.forward(x, gt) return {'loss_ap': float(loss)} class APLoss_dist(APLoss): def forward(self, x, label, **kw): d = sim_to_dist(x) return APLoss.forward(self, d, label, **kw) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_rsub_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 2.001 tmp4 = tmp3 - tmp2 tmp5 = libdevice.sqrt(tmp4) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0 ) tmp3 = tl.load(in_ptr0 + (100 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + (25 + r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr0 + (101 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr0 + (102 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr0 + (103 + 4 * r1 + 200 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.minimum(tmp2, tmp5) tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp10 = tmp8 * tmp9 tmp12 = tmp11 + tmp1 tmp14 = tmp13 + tmp4 tmp15 = triton_helpers.minimum(tmp12, tmp14) tmp16 = triton_helpers.maximum(tmp15, tmp7) tmp18 = tmp16 * tmp17 tmp19 = tmp10 + tmp18 tmp21 = tmp20 + tmp1 tmp23 = tmp22 + tmp4 tmp24 = triton_helpers.minimum(tmp21, tmp23) tmp25 = triton_helpers.maximum(tmp24, tmp7) tmp27 = tmp25 * tmp26 tmp28 = tmp19 + tmp27 tmp30 = tmp29 + tmp1 tmp32 = tmp31 + tmp4 tmp33 = triton_helpers.minimum(tmp30, tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp7) tmp36 = tmp34 * tmp35 tmp37 = tmp28 + tmp36 tmp38 = tmp8 + tmp16 tmp39 = tmp38 + tmp25 tmp40 = tmp39 + tmp34 tmp41 = tmp40.to(tl.float32) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp43, = tl.associative_scan((tmp42,), 1, _triton_helper_fn_add0) tmp44 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp46 = tl.where(rmask & xmask, tmp44, 0) tmp47 = tl.sum(tmp46, 1)[:, None] tmp48 = tmp37.to(tl.float32) tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp50, = tl.associative_scan((tmp49,), 1, _triton_helper_fn_add0) tmp51 = 1e-16 tmp52 = tmp43 + tmp51 tmp53 = tmp50 / tmp52 tmp54 = tmp37 / tmp47 tmp55 = tmp53 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(rmask & xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tl.store(in_out_ptr1 + x0, tmp59, xmask) @triton.jit def triton_per_fused_mean_rsub_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 / tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (50, 1, 1), (1, 1, 1)) assert_size_stride(arg3_1, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_rsub_sqrt_0[grid(16)](arg0_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 1, 4 ), (4, 0, 1), 0), arg2_1, stride=(1,), padding=(0,), dilation=( 1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 50, 4), (200, 4, 1)) del arg2_1 del buf0 buf6 = empty_strided_cuda((4,), (1,), torch.float32) buf7 = buf6 del buf6 triton_per_fused_add_clamp_cumsum_div_minimum_mul_sum_1[grid(4)](buf7, buf1, arg3_1, arg1_1, 4, 25, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del arg3_1 del buf1 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8 del buf8 triton_per_fused_mean_rsub_2[grid(1)](buf9, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf7 return buf9, def sim_to_dist(scores): return 1 - torch.sqrt(2.001 - 2 * scores) class APLoss(nn.Module): """ Differentiable AP loss, through quantization. From the paper: Learning with Average Precision: Training Image Retrieval with a Listwise Loss Jerome Revaud, Jon Almazan, Rafael Sampaio de Rezende, Cesar de Souza https://arxiv.org/abs/1906.07589 Input: (N, M) values in [min, max] label: (N, M) values in {0, 1} Returns: 1 - mAP (mean AP for each n in {1..N}) Note: typically, this is what you wanna minimize """ def __init__(self, nq=25, min=0, max=1): nn.Module.__init__(self) assert isinstance(nq, int) and 2 <= nq <= 100 self.nq = nq self.min = min self.max = max gap = max - min assert gap > 0 self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True) q.weight = nn.Parameter(q.weight.detach(), requires_grad=False) q.bias = nn.Parameter(q.bias.detach(), requires_grad=False) a = (nq - 1) / gap q.weight[:nq] = -a q.bias[:nq] = torch.from_numpy(a * min + np.arange(nq, 0, -1)) q.weight[nq:] = a q.bias[nq:] = torch.from_numpy(np.arange(2 - nq, 2, 1) - a * min) q.weight[0] = q.weight[-1] = 0 q.bias[0] = q.bias[-1] = 1 def forward(self, x, label, qw=None, ret='1-mAP'): assert x.shape == label.shape N, M = x.shape q = self.quantizer(x.unsqueeze(1)) q = torch.min(q[:, :self.nq], q[:, self.nq:]).clamp(min=0) nbs = q.sum(dim=-1) rec = (q * label.view(N, 1, M).float()).sum(dim=-1) prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) rec /= rec.sum(dim=-1).unsqueeze(1) ap = (prec * rec).sum(dim=-1) if ret == '1-mAP': if qw is not None: ap *= qw return 1 - ap.mean() elif ret == 'AP': assert qw is None return ap else: raise ValueError('Bad return type for APLoss(): %s' % str(ret)) def measures(self, x, gt, loss=None): if loss is None: loss = self.forward(x, gt) return {'loss_ap': float(loss)} class APLoss_distNew(APLoss): def forward(self, input_0, input_1): arg2_1 = self.quantizer.weight arg3_1 = self.quantizer.bias arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
dongan-beta/deep-image-retrieval
APLoss_dist
false
15,202
[ "BSD-3-Clause" ]
253
3e0885f88da328aefb7abb2fa350f8860a4bd52d
https://github.com/dongan-beta/deep-image-retrieval/tree/3e0885f88da328aefb7abb2fa350f8860a4bd52d
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wl/cwldpc2k6v7rbizd6tlddleva3alwxblabsherkqjtef5e45djwk.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 8 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4n/c4nv2qrk3qesfzhmmlvg6nswa4xauhqhvv4tkc4vcrmocpm2vugt.py # Topologically Sorted Source Nodes: [output, output_1, output_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output => convolution # output_1 => add, repeat, rsqrt, var_mean # output_2 => relu # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (25*x0)), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 25, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 25.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(in_out_ptr0 + (r3 + (25*x0)), tmp3, rmask & xmask) tl.store(out_ptr3 + (r3 + (25*x0)), tmp31, rmask & xmask) tl.store(out_ptr4 + (r3 + (25*x0)), tmp33, rmask & xmask) tl.store(out_ptr5 + (x0), tmp25, xmask) tl.store(out_ptr1 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf3 = empty_strided_cuda((16, ), (1, ), torch.float32) buf2 = buf1; del buf1 # reuse buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output, output_1, output_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1.run(buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf9, buf7, 16, 25, grid=grid(16), stream=stream0) del primals_3 del primals_4 del primals_5 return (buf8, primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16, ), (1, ), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn.functional import interpolate from typing import cast class Interpolate(nn.Module): def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest' ) ->None: super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input: 'torch.Tensor') ->torch.Tensor: return cast(torch.Tensor, interpolate(input, scale_factor=self. scale_factor, mode=self.mode)) def extra_repr(self) ->str: extras = [f'scale_factor={self.scale_factor}'] if self.mode != 'nearest': extras.append(f'mode={self.mode}') return ', '.join(extras) class Conv(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', *, stride: int=1, upsample: bool=False, norm: bool=True, activation: bool=True): super().__init__() self.upsample = Interpolate(scale_factor=stride) if upsample else None self.pad = nn.ReflectionPad2d(kernel_size // 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1 if upsample else stride) self.norm = nn.InstanceNorm2d(out_channels, affine=True ) if norm else None self.activation = nn.ReLU() if activation else None def forward(self, input: 'torch.Tensor') ->torch.Tensor: if self.upsample: input = self.upsample(input) output = self.conv(self.pad(input)) if self.norm: output = self.norm(output) if self.activation: output = self.activation(output) return cast(torch.Tensor, output) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torch.nn.functional import interpolate from typing import cast assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 25 * x0), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 25, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 25.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp30 = tl.full([1, 1], 0, tl.int32) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp32 = 0.0 tmp33 = tmp31 <= tmp32 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 25 * x0), tmp3, rmask & xmask) tl.store(out_ptr3 + (r3 + 25 * x0), tmp31, rmask & xmask) tl.store(out_ptr4 + (r3 + 25 * x0), tmp33, rmask & xmask) tl.store(out_ptr5 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf3 = empty_strided_cuda((16,), (1,), torch.float32) buf2 = buf1 del buf1 buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1[ grid(16)](buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf9, buf7, 16, 25, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 del primals_4 del primals_5 return buf8, primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16, ), (1,), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0) class Interpolate(nn.Module): def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest' ) ->None: super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input: 'torch.Tensor') ->torch.Tensor: return cast(torch.Tensor, interpolate(input, scale_factor=self. scale_factor, mode=self.mode)) def extra_repr(self) ->str: extras = [f'scale_factor={self.scale_factor}'] if self.mode != 'nearest': extras.append(f'mode={self.mode}') return ', '.join(extras) class ConvNew(nn.Module): def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: 'int', *, stride: int=1, upsample: bool=False, norm: bool=True, activation: bool=True): super().__init__() self.upsample = Interpolate(scale_factor=stride) if upsample else None self.pad = nn.ReflectionPad2d(kernel_size // 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=1 if upsample else stride) self.norm = nn.InstanceNorm2d(out_channels, affine=True ) if norm else None self.activation = nn.ReLU() if activation else None def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dooglewoogle/pystiche
Conv
false
15,203
[ "BSD-3-Clause" ]
129
14b61123ede2abdb00daaa5b4981de6d7edaf034
https://github.com/dooglewoogle/pystiche/tree/14b61123ede2abdb00daaa5b4981de6d7edaf034
_nms
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bx/cbxtynq44n4tp4cxbujfty37ujqf74bvycxzmcnqv3qcmqmrt2yf.py # Topologically Sorted Source Nodes: [hmax, eq, keep, mul], Original ATen: [aten.max_pool2d_with_indices, aten.eq, aten._to_copy, aten.mul] # Source node to ATen node mapping: # eq => eq # hmax => _low_memory_max_pool2d_with_offsets # keep => convert_element_type # mul => mul # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [3, 3], [1, 1], [1, 1], [1, 1], False), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%getitem, %arg0_1), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %convert_element_type), kwargs = {}) triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0 = async_compile.triton('triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x3 = xindex tmp52 = tl.load(in_ptr0 + (x3), xmask) tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x3), tmp10 & xmask, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + x3), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + x3), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + x3), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x3), tmp33 & xmask, other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp53 = tmp51 == tmp52 tmp54 = tmp53.to(tl.float32) tmp55 = tmp52 * tmp54 tl.store(in_out_ptr0 + (x3), tmp55, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [hmax, eq, keep, mul], Original ATen: [aten.max_pool2d_with_indices, aten.eq, aten._to_copy, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class _nms(nn.Module): def __init__(self): super(_nms, self).__init__() kernel = 3 pad = (kernel - 1) // 2 self.maxpool = nn.MaxPool2d(kernel_size=kernel, stride=1, padding=pad) def forward(self, heat): hmax = self.maxpool(heat) keep = (hmax == heat).float() return heat * keep def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex tmp52 = tl.load(in_ptr0 + x3, xmask) tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp53 = tmp51 == tmp52 tmp54 = tmp53.to(tl.float32) tmp55 = tmp52 * tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy_eq_max_pool2d_with_indices_mul_0[grid(256)]( buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, class _nmsNew(nn.Module): def __init__(self): super(_nmsNew, self).__init__() kernel = 3 pad = (kernel - 1) // 2 self.maxpool = nn.MaxPool2d(kernel_size=kernel, stride=1, padding=pad) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
donnyyou/centerX
_nms
false
15,204
[ "Apache-2.0" ]
350
6e381cb669a6014d02e31a43915271237690531c
https://github.com/donnyyou/centerX/tree/6e381cb669a6014d02e31a43915271237690531c
UpConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tc/ctc7x3jpax35yx2pkxxfdyyfficonvfhhsbh4f7urq6xwaxnxl5l.py # Topologically Sorted Source Nodes: [conv_transpose2d, elu], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv_transpose2d => convolution # elu => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv_transpose2d, elu], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class UpConv(nn.Module): def __init__(self, input_nc, output_nc, kernel_size): super(UpConv, self).__init__() self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels =output_nc, kernel_size=2, bias=True, stride=2, padding=0) self.activation_fn = nn.ELU() def forward(self, input): return self.activation_fn(self.deconv(input)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_nc': 4, 'output_nc': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(1024)](buf1, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, buf1 class UpConvNew(nn.Module): def __init__(self, input_nc, output_nc, kernel_size): super(UpConvNew, self).__init__() self.deconv = nn.ConvTranspose2d(in_channels=input_nc, out_channels =output_nc, kernel_size=2, bias=True, stride=2, padding=0) self.activation_fn = nn.ELU() def forward(self, input_0): primals_1 = self.deconv.weight primals_2 = self.deconv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dong1015323606/LKVOLearner
UpConv
false
15,205
[ "BSD-3-Clause" ]
237
6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
https://github.com/dong1015323606/LKVOLearner/tree/6ac9fb5d3c22d6a81529063f8c52d6aa34166b2a
DetLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iz/ciznu5jo2naglznr53wlmdnmy42srwbnjdzba4oimbqvfgw5l3i5.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, sub, mul_1, p_det, mul_2, mean, mean_1, det_loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.sigmoid, aten.mean, aten.div] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_2, neg, sub_1, sub_2, sub_3 # det_loss => div # mean => mean # mean_1 => mean_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_3 # p_det => sigmoid # sub => sub # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %sub_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sigmoid), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sigmoid,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean, %mean_1), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 2.0 tmp14 = tmp0 * tmp13 tmp15 = tmp1 - tmp14 tmp16 = tmp3 * tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp12 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tl.broadcast_to(tmp17, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp25 = 256.0 tmp26 = tmp21 / tmp25 tmp27 = tmp24 / tmp25 tmp28 = tmp26 / tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b3/cb37x6vshwi3n2trcskoatxxt4dihceogwvqdwcm7ezzsbbhptqm.py # Topologically Sorted Source Nodes: [max_1, smooth_l1_loss, mul_3, mean_2, smooth_l1_loss_1, mul_4, mean_4], Original ATen: [aten.max, aten.smooth_l1_loss, aten.mul, aten.mean] # Source node to ATen node mapping: # max_1 => max_1 # mean_2 => mean_2 # mean_4 => mean_4 # mul_3 => mul_5 # mul_4 => mul_7 # smooth_l1_loss => abs_2, div_1, lt, mul_4, pow_1, sub_4, sub_5, where # smooth_l1_loss_1 => abs_3, div_3, lt_1, mul_6, pow_2, sub_6, sub_7, where_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 1, True), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {}) # %abs_2 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub_4,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_2, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_2, 2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, 1.0), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_2, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_1, %sub_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %where), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_5,), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg5_1, %arg4_1), kwargs = {}) # %abs_3 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub_6,), kwargs = {}) # %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_3, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_3, 2), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.5), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_6, 1.0), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_3, 0.5), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %div_3, %sub_7), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %where_1), kwargs = {}) # %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_7,), kwargs = {}) triton_per_fused_max_mean_mul_smooth_l1_loss_1 = async_compile.triton('triton_per_fused_max_mean_mul_smooth_l1_loss_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_mean_mul_smooth_l1_loss_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_mean_mul_smooth_l1_loss_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = (rindex // 64) r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (r3), None) tmp8 = tl.load(in_ptr2 + (r3), None) tmp23 = tl.load(in_ptr3 + (r3), None) tmp24 = tl.load(in_ptr4 + (r3), None) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = 1.0 tmp12 = tmp10 < tmp11 tmp13 = tmp10 * tmp10 tmp14 = 0.5 tmp15 = tmp13 * tmp14 tmp16 = tmp15 * tmp11 tmp17 = tmp10 - tmp14 tmp18 = tl.where(tmp12, tmp16, tmp17) tmp19 = tmp6 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp25 = tmp23 - tmp24 tmp26 = tl_math.abs(tmp25) tmp27 = tmp26 < tmp11 tmp28 = tmp26 * tmp26 tmp29 = tmp28 * tmp14 tmp30 = tmp29 * tmp11 tmp31 = tmp26 - tmp14 tmp32 = tl.where(tmp27, tmp30, tmp31) tmp33 = tmp6 * tmp32 tmp34 = tl.broadcast_to(tmp33, [RBLOCK]) tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp22, None) tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp36, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o4/co4s23zu5u7g36huc5igwk5dddyg4nxktadctutqptqbdbgjomam.py # Topologically Sorted Source Nodes: [max_1, smooth_l1_loss, mul_3, mean_2, mean_3, box_loss, smooth_l1_loss_1, mul_4, mean_4, mean_5, ori_loss], Original ATen: [aten.max, aten.smooth_l1_loss, aten.mul, aten.mean, aten.div] # Source node to ATen node mapping: # box_loss => div_2 # max_1 => max_1 # mean_2 => mean_2 # mean_3 => mean_3 # mean_4 => mean_4 # mean_5 => mean_5 # mul_3 => mul_5 # mul_4 => mul_7 # ori_loss => div_4 # smooth_l1_loss => abs_2, div_1, lt, mul_4, pow_1, sub_4, sub_5, where # smooth_l1_loss_1 => abs_3, div_3, lt_1, mul_6, pow_2, sub_6, sub_7, where_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 1, True), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {}) # %abs_2 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub_4,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_2, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_2, 2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, 1.0), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_2, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_1, %sub_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %where), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_5,), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean_2, %mean_3), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg5_1, %arg4_1), kwargs = {}) # %abs_3 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub_6,), kwargs = {}) # %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_3, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_3, 2), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.5), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_6, 1.0), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_3, 0.5), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt_1, %div_3, %sub_7), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %where_1), kwargs = {}) # %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_7,), kwargs = {}) # %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean_4, %mean_5), kwargs = {}) triton_per_fused_div_max_mean_mul_smooth_l1_loss_2 = async_compile.triton('triton_per_fused_div_max_mean_mul_smooth_l1_loss_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_max_mean_mul_smooth_l1_loss_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_max_mean_mul_smooth_l1_loss_2(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp10 = tl.load(in_out_ptr0 + (0)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1]) tmp17 = tl.load(in_out_ptr1 + (0)) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, 1]) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = 256.0 tmp13 = tmp11 / tmp12 tmp14 = 64.0 tmp15 = tmp9 / tmp14 tmp16 = tmp13 / tmp15 tmp19 = tmp18 / tmp12 tmp20 = tmp19 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf6 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul, sub, mul_1, p_det, mul_2, mean, mean_1, det_loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.sigmoid, aten.mean, aten.div] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0.run(buf6, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [max_1, smooth_l1_loss, mul_3, mean_2, smooth_l1_loss_1, mul_4, mean_4], Original ATen: [aten.max, aten.smooth_l1_loss, aten.mul, aten.mean] triton_per_fused_max_mean_mul_smooth_l1_loss_1.run(arg0_1, arg3_1, arg2_1, arg5_1, arg4_1, buf2, buf4, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del arg3_1 del arg4_1 del arg5_1 buf8 = buf4; del buf4 # reuse buf7 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [max_1, smooth_l1_loss, mul_3, mean_2, mean_3, box_loss, smooth_l1_loss_1, mul_4, mean_4, mean_5, ori_loss], Original ATen: [aten.max, aten.smooth_l1_loss, aten.mul, aten.mean, aten.div] triton_per_fused_div_max_mean_mul_smooth_l1_loss_2.run(buf8, buf7, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf6, buf7, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DetLoss(nn.Module): def __init__(self): super().__init__() self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none') self.ori_criterion = nn.SmoothL1Loss(reduction='none') self.box_criterion = nn.SmoothL1Loss(reduction='none') def forward(self, pred_heatmaps, heatmaps, pred_sizemaps, sizemaps, pred_orimaps, orimaps): size_w, _ = heatmaps.max(dim=1, keepdim=True) p_det = torch.sigmoid(pred_heatmaps * (1 - 2 * heatmaps)) det_loss = (self.hm_criterion(pred_heatmaps, heatmaps) * p_det).mean( ) / p_det.mean() box_loss = (size_w * self.box_criterion(pred_sizemaps, sizemaps)).mean( ) / size_w.mean() ori_loss = (size_w * self.ori_criterion(pred_orimaps, orimaps)).mean( ) / size_w.mean() return det_loss, box_loss, ori_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 2.0 tmp14 = tmp0 * tmp13 tmp15 = tmp1 - tmp14 tmp16 = tmp3 * tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp12 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tl.broadcast_to(tmp17, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp25 = 256.0 tmp26 = tmp21 / tmp25 tmp27 = tmp24 / tmp25 tmp28 = tmp26 / tmp27 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) @triton.jit def triton_per_fused_max_mean_mul_smooth_l1_loss_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex // 64 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + r3, None) tmp8 = tl.load(in_ptr2 + r3, None) tmp23 = tl.load(in_ptr3 + r3, None) tmp24 = tl.load(in_ptr4 + r3, None) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = 1.0 tmp12 = tmp10 < tmp11 tmp13 = tmp10 * tmp10 tmp14 = 0.5 tmp15 = tmp13 * tmp14 tmp16 = tmp15 * tmp11 tmp17 = tmp10 - tmp14 tmp18 = tl.where(tmp12, tmp16, tmp17) tmp19 = tmp6 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp25 = tmp23 - tmp24 tmp26 = tl_math.abs(tmp25) tmp27 = tmp26 < tmp11 tmp28 = tmp26 * tmp26 tmp29 = tmp28 * tmp14 tmp30 = tmp29 * tmp11 tmp31 = tmp26 - tmp14 tmp32 = tl.where(tmp27, tmp30, tmp31) tmp33 = tmp6 * tmp32 tmp34 = tl.broadcast_to(tmp33, [RBLOCK]) tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp36, None) @triton.jit def triton_per_fused_div_max_mean_mul_smooth_l1_loss_2(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp10 = tl.load(in_out_ptr0 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1]) tmp17 = tl.load(in_out_ptr1 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, 1]) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = 256.0 tmp13 = tmp11 / tmp12 tmp14 = 64.0 tmp15 = tmp9 / tmp14 tmp16 = tmp13 / tmp15 tmp19 = tmp18 / tmp12 tmp20 = tmp19 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf6 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_div_mean_mul_rsub_sigmoid_0[ grid(1)](buf6, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_mean_mul_smooth_l1_loss_1[grid(1)](arg0_1, arg3_1, arg2_1, arg5_1, arg4_1, buf2, buf4, 1, 256, num_warps=2, num_stages=1) del arg2_1 del arg3_1 del arg4_1 del arg5_1 buf8 = buf4 del buf4 buf7 = buf2 del buf2 triton_per_fused_div_max_mean_mul_smooth_l1_loss_2[grid(1)](buf8, buf7, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf6, buf7, buf8 class DetLossNew(nn.Module): def __init__(self): super().__init__() self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none') self.ori_criterion = nn.SmoothL1Loss(reduction='none') self.box_criterion = nn.SmoothL1Loss(reduction='none') def forward(self, input_0, input_1, input_2, input_3, input_4, input_5): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 arg5_1 = input_5 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1]) return output[0], output[1], output[2]
dotchen/LAV
DetLoss
false
15,206
[ "Apache-2.0" ]
122
dc9b4cfca39abd50c7438e8749d49f6ac0fe5e4e
https://github.com/dotchen/LAV/tree/dc9b4cfca39abd50c7438e8749d49f6ac0fe5e4e
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1 => relu # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (1, 4), (4, 1)) assert_size_stride(primals_14, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_value1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf7) del primals_9 buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf8, primals_10, 16, grid=grid(16), stream=stream0) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf10, primals_12, 16, grid=grid(16), stream=stream0) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_value2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Critic(nn.Module): def __init__(self, state_size, action_size, args): super(Critic, self).__init__() self.fc1 = nn.Linear(state_size + action_size, args.hidden_size) self.fc2 = nn.Linear(args.hidden_size, args.hidden_size) self.fc3 = nn.Linear(args.hidden_size, 1) self.fc4 = nn.Linear(state_size + action_size, args.hidden_size) self.fc5 = nn.Linear(args.hidden_size, args.hidden_size) self.fc6 = nn.Linear(args.hidden_size, 1) def forward(self, states, actions): x = torch.cat([states, actions], dim=1) x1 = torch.relu(self.fc1(x)) x1 = torch.relu(self.fc2(x1)) q_value1 = self.fc3(x1) x2 = torch.relu(self.fc4(x)) x2 = torch.relu(self.fc5(x2)) q_value2 = self.fc6(x2) return q_value1, q_value2 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'args': _mock_config( hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1, 4), (4, 1)) assert_size_stride(primals_14, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 4), (1, 8 ), 0), out=buf7) del primals_9 buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(16)](buf8, primals_10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_1[grid(16)](buf10, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_14, buf10, reinterpret_tensor( primals_13, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5) class CriticNew(nn.Module): def __init__(self, state_size, action_size, args): super(CriticNew, self).__init__() self.fc1 = nn.Linear(state_size + action_size, args.hidden_size) self.fc2 = nn.Linear(args.hidden_size, args.hidden_size) self.fc3 = nn.Linear(args.hidden_size, 1) self.fc4 = nn.Linear(state_size + action_size, args.hidden_size) self.fc5 = nn.Linear(args.hidden_size, args.hidden_size) self.fc6 = nn.Linear(args.hidden_size, 1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_9 = self.fc4.weight primals_10 = self.fc4.bias primals_2 = self.fc5.weight primals_12 = self.fc5.bias primals_13 = self.fc6.weight primals_14 = self.fc6.bias primals_5 = input_0 primals_11 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
dongminlee94/Samsung-DRL-Code
Critic
false
15,207
[ "MIT" ]
116
c96f8739a09cfd708c265954ee8ecf0ea3b67395
https://github.com/dongminlee94/Samsung-DRL-Code/tree/c96f8739a09cfd708c265954ee8ecf0ea3b67395
AngleSimpleLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/km/ckmig5ptfj2o4ehbs23kkteksy6opupdrfewu7uhpa5zkvhn3l6r.py # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min_2 # Graph fragment: # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mm, -0.9999999), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 0.9999999), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%mm, -0.9999999), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mm, 0.9999999), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_2 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -0.9999999 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.9999999 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1, cos_theta], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1; del buf1 # reuse buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_2.run(buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) del buf2 return (buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.onnx import torch.nn class AngleSimpleLinear(nn.Module): """Computes cos of angles between input vectors and weights vectors""" def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, x): cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0)) return cos_theta.clamp(-1.0 + 1e-07, 1.0 - 1e-07), def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models from torch.nn import Parameter from torch.nn.parameter import Parameter import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -0.9999999 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.9999999 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) buf3 = buf1 del buf1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0) class AngleSimpleLinearNew(nn.Module): """Computes cos of angles between input vectors and weights vectors""" def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dqawami/openvino_training_extensions
AngleSimpleLinear
false
15,208
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
LogitKLDivLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l3/cl3mqwaki56dc4zcxfjjgkbopnejxzhksqm6egdinynmjrsrw2qw.py # Topologically Sorted Source Nodes: [q], Original ATen: [aten._softmax] # Source node to ATen node mapping: # q => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pf/cpfkvifhrhobwuxls65xhwdpkryeblqmmtghouii4lp3rhe3crx4.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2q/c2qrlfsyqs2p3f3bpqyxkmtudhr7ggfpwzeibezoa7vdh4hgyyfy.py # Topologically Sorted Source Nodes: [q, kl_div, log_p, mul], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => div_3, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # log_p => exp, log, sub_1, sum_1 # mul => mul_2 # q => div_2, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 1), kwargs = {}) triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (r3), None) tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [q, kl_div, log_p, mul], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class LogitKLDivLoss(nn.Module): """Kullback–Leibler divergence loss. Inputs predicted and ground truth logits. Args: T (float): Softmax temperature. """ def __init__(self, T=1): super().__init__() self.T = T def forward(self, p_logits, q_logits, **kwargs): log_p = F.log_softmax(p_logits / self.T, dim=1) q = F.softmax(q_logits / self.T, dim=1) return F.kl_div(log_p, q, reduction='batchmean') * self.T ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = 1.0 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class LogitKLDivLossNew(nn.Module): """Kullback–Leibler divergence loss. Inputs predicted and ground truth logits. Args: T (float): Softmax temperature. """ def __init__(self, T=1): super().__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dqawami/openvino_training_extensions
LogitKLDivLoss
false
15,209
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
LengthPredictor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2p/c2pz7hk7ojfdgnk4ip5f32yhhrwsy5z4dgkcle3scbax3xbada7z.py # Topologically Sorted Source Nodes: [mul, sum_1, mean_emb], Original ATen: [aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # mean_emb => div # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %unsqueeze), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %unsqueeze_1), kwargs = {}) triton_poi_fused_div_mul_sum_0 = async_compile.triton('triton_poi_fused_div_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x0 = xindex % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tmp1 + tmp4 tmp16 = tmp15 + tmp8 tmp17 = tmp16 + tmp12 tmp18 = tmp14 / tmp17 tl.store(out_ptr0 + (x4), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tf/ctfw3infhg572qltjdhl4t446ht5hhkkxggakqt7t537x5viyosq.py # Topologically Sorted Source Nodes: [argmax, delta], Original ATen: [aten.argmax, aten.sub] # Source node to ATen node mapping: # argmax => argmax # delta => sub # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%view_1, -1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%argmax, 50.0), kwargs = {}) triton_per_fused_argmax_sub_1 = async_compile.triton('triton_per_fused_argmax_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_argmax_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_argmax_sub_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 100 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (100*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = tl.broadcast_to(rindex, tmp3.shape) _, tmp2_tmp = triton_helpers.max_with_index(tmp3, tmp4, 1) tmp2 = tmp2_tmp[:, None] tmp5 = tmp2.to(tl.float32) tmp6 = 50.0 tmp7 = tmp5 - tmp6 tl.store(out_ptr1 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (100, 4), (4, 1)) assert_size_stride(primals_4, (100, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, mean_emb], Original ATen: [aten.mul, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [argmax, delta], Original ATen: [aten.argmax, aten.sub] triton_per_fused_argmax_sub_1.run(buf1, buf3, 64, 100, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0), buf3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class LengthPredictionLoss(nn.Module): def __init__(self, max_delta=50): super().__init__() self.max_delta = max_delta def forward(self, logits, src_mask, tgt_mask): src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1) delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self. max_delta * 2 - 1).long() loss = F.cross_entropy(logits, delta, reduction='mean') return {'length_prediction_loss': loss} class LengthPredictor(nn.Module): def __init__(self, hidden_size, max_delta=50): super().__init__() self.hidden_size = hidden_size self.max_delta = max_delta self._init_modules() self._init_loss() def forward(self, src, src_mask, tgt_len=None): src_mean = self._compute_mean_emb(src, src_mask) logits, delta = self._predict_delta(src_mean) return logits, delta def _predict_delta(self, src): logits = self.length_predictor(src) delta = logits.argmax(-1) - float(self.max_delta) return logits, delta def _compute_mean_emb(self, src, src_mask): mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:, None] return mean_emb def _init_modules(self): self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2) def _init_loss(self): self.loss = LengthPredictionLoss(self.max_delta) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tmp1 + tmp4 tmp16 = tmp15 + tmp8 tmp17 = tmp16 + tmp12 tmp18 = tmp14 / tmp17 tl.store(out_ptr0 + x4, tmp18, xmask) @triton.jit def triton_per_fused_argmax_sub_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 rnumel = 100 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = tl.broadcast_to(rindex, tmp3.shape) _, tmp2_tmp = triton_helpers.max_with_index(tmp3, tmp4, 1) tmp2 = tmp2_tmp[:, None] tmp5 = tmp2.to(tl.float32) tmp6 = 50.0 tmp7 = tmp5 - tmp6 tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (100, 4), (4, 1)) assert_size_stride(primals_4, (100,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 100), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_argmax_sub_1[grid(64)](buf1, buf3, 64, 100, XBLOCK =8, num_warps=8, num_stages=1) return reinterpret_tensor(buf1, (4, 4, 4, 100), (1600, 400, 100, 1), 0 ), buf3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class LengthPredictionLoss(nn.Module): def __init__(self, max_delta=50): super().__init__() self.max_delta = max_delta def forward(self, logits, src_mask, tgt_mask): src_lens, tgt_lens = src_mask.sum(1), tgt_mask.sum(1) delta = (tgt_lens - src_lens + self.max_delta).clamp(0, self. max_delta * 2 - 1).long() loss = F.cross_entropy(logits, delta, reduction='mean') return {'length_prediction_loss': loss} class LengthPredictorNew(nn.Module): def __init__(self, hidden_size, max_delta=50): super().__init__() self.hidden_size = hidden_size self.max_delta = max_delta self._init_modules() self._init_loss() def _predict_delta(self, src): logits = self.length_predictor(src) delta = logits.argmax(-1) - float(self.max_delta) return logits, delta def _compute_mean_emb(self, src, src_mask): mean_emb = (src * src_mask[:, :, None]).sum(1) / src_mask.sum(1)[:, None] return mean_emb def _init_modules(self): self.length_predictor = nn.Linear(self.hidden_size, self.max_delta * 2) def _init_loss(self): self.loss = LengthPredictionLoss(self.max_delta) def forward(self, input_0, input_1): primals_3 = self.length_predictor.weight primals_4 = self.length_predictor.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
dqawami/openvino_training_extensions
LengthPredictor
false
15,210
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
ResNet_conv1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qu/cquqzv3ivftfhjiw5pegt6wcvhg47xarvd7qjcaxl2xtmqgdwshd.py # Topologically Sorted Source Nodes: [sub, truediv, setitem, sub_1, truediv_1, setitem_1, sub_2, truediv_2, setitem_2], Original ATen: [aten.sub, aten.div, aten.copy] # Source node to ATen node mapping: # setitem => copy # setitem_1 => copy_1 # setitem_2 => copy_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select, 0.485), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 0.229), kwargs = {}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %div), kwargs = {}) # %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%primals_1, %copy, 1, 0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_5, 0.485), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, 0.224), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_5, %div_1), kwargs = {}) # %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy_1, 1, 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_11, 0.485), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 0.224), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_11, %div_2), kwargs = {}) # %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %copy_2, 1, 2), kwargs = {}) triton_poi_fused_copy_div_sub_0 = async_compile.triton('triton_poi_fused_copy_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 3 x0 = xindex % 4096 x2 = (xindex // 12288) x3 = xindex tmp7 = tl.load(in_ptr0 + (x0 + (12288*x2)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4096 + x0 + (12288*x2)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (8192 + x0 + (12288*x2)), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (x3), None) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp3 == tmp5 tmp8 = 0.485 tmp9 = tmp7 - tmp8 tmp10 = 4.366812227074235 tmp11 = tmp9 * tmp10 tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tmp13 - tmp8 tmp15 = 4.464285714285714 tmp16 = tmp14 * tmp15 tmp17 = tmp1 == tmp5 tmp19 = tl.where(tmp17, tmp11, tmp18) tmp20 = tl.where(tmp4, tmp16, tmp19) tmp21 = tmp20 - tmp8 tmp22 = tmp21 * tmp15 tmp23 = tmp0 == tmp3 tmp24 = tmp0 == tmp5 tmp26 = tl.where(tmp24, tmp11, tmp25) tmp27 = tl.where(tmp23, tmp16, tmp26) tmp28 = tl.where(tmp2, tmp22, tmp27) tl.store(out_ptr0 + (x3), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 3, 7, 7), (147, 49, 7, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv, setitem, sub_1, truediv_1, setitem_1, sub_2, truediv_2, setitem_2], Original ATen: [aten.sub, aten.div, aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_div_sub_0.run(primals_1, buf0, 49152, grid=grid(49152), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1)) return (buf1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torch.nn as nn class ResNet_conv1(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet_conv1, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): y = x.clone() y[:, 0, :, :] = (y[:, 0, :, :] - 0.485) / 0.229 y[:, 1, :, :] = (y[:, 1, :, :] - 0.485) / 0.224 y[:, 2, :, :] = (y[:, 2, :, :] - 0.485) / 0.224 x = self.conv1(y) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block': 4, 'layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 3 x0 = xindex % 4096 x2 = xindex // 12288 x3 = xindex tmp7 = tl.load(in_ptr0 + (x0 + 12288 * x2), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (4096 + x0 + 12288 * x2), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (8192 + x0 + 12288 * x2), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + x3, None) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 1, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp3 == tmp5 tmp8 = 0.485 tmp9 = tmp7 - tmp8 tmp10 = 4.366812227074235 tmp11 = tmp9 * tmp10 tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tmp13 - tmp8 tmp15 = 4.464285714285714 tmp16 = tmp14 * tmp15 tmp17 = tmp1 == tmp5 tmp19 = tl.where(tmp17, tmp11, tmp18) tmp20 = tl.where(tmp4, tmp16, tmp19) tmp21 = tmp20 - tmp8 tmp22 = tmp21 * tmp15 tmp23 = tmp0 == tmp3 tmp24 = tmp0 == tmp5 tmp26 = tl.where(tmp24, tmp11, tmp25) tmp27 = tl.where(tmp23, tmp16, tmp26) tmp28 = tl.where(tmp2, tmp22, tmp27) tl.store(out_ptr0 + x3, tmp28, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 3, 7, 7), (147, 49, 7, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_div_sub_0[grid(49152)](primals_1, buf0, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1)) return buf1, primals_2, buf0 class ResNet_conv1New(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet_conv1New, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, input_0): primals_2 = self.conv1.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
donegaci/memc-net
ResNet_conv1
false
15,211
[ "MIT" ]
145
9bdb0ab6ce99af22a165db2cedacd148dd6083c0
https://github.com/donegaci/memc-net/tree/9bdb0ab6ce99af22a165db2cedacd148dd6083c0
Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hl/chltyhkzxhjzwmo5rp357yg3rwp5ia6qja7pmyko6cvpxcmgnl6l.py # Topologically Sorted Source Nodes: [z2, out, out_1], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.mul] # Source node to ATen node mapping: # out => sub # out_1 => mul # z2 => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, 4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) triton_per_fused_linalg_vector_norm_mul_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = libdevice.sqrt(tmp4) tmp6 = 4.0 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [z2, out, out_1], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_linalg_vector_norm_mul_sub_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class Norm(nn.Module): def __init__(self, dims): super(Norm, self).__init__() self.dims = dims def forward(self, x): z2 = torch.norm(x, p=2) out = z2 - self.dims out = out * out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dims': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = libdevice.sqrt(tmp4) tmp6 = 4.0 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mul_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NormNew(nn.Module): def __init__(self, dims): super(NormNew, self).__init__() self.dims = dims def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
doudoulaile/RL-GAN-Net
Norm
false
15,212
[ "MIT" ]
112
9c221223d1878bc24f0f39ad34928c1bb2974ae3
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
StateInitZero
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py # Topologically Sorted Source Nodes: [h0], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # h0 => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h0], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c0], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class StateInitZero(nn.Module): def __init__(self, hidden_size, num_layers=1, batch_first=False): super(StateInitZero, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.batch_first = batch_first def forward(self, input: 'torch.Tensor'): h0 = input.new_zeros((self.num_layers, input.size(0 if self. batch_first else 1), self.hidden_size)) c0 = input.new_zeros((self.num_layers, input.size(0 if self. batch_first else 1), self.hidden_size)) return h0, c0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_new_zeros_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf0, buf1 class StateInitZeroNew(nn.Module): def __init__(self, hidden_size, num_layers=1, batch_first=False): super(StateInitZeroNew, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.batch_first = batch_first def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
dqawami/openvino_training_extensions
StateInitZero
false
15,213
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4q/c4qoh645afcunrhaa5xye6sbkw2mzzlvmntdpffld4732bbjzx7o.py # Topologically Sorted Source Nodes: [dimention, attn_2], Original ATen: [aten.sqrt, aten._softmax] # Source node to ATen node mapping: # attn_2 => exp # dimention => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {}) # %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False}) # %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dimention, attn_2], Original ATen: [aten.sqrt, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class ScaledDotProductAttention(nn.Module): def __init__(self, dropout=0, scale=True): super().__init__() self.dropout = nn.Dropout(p=dropout) self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.permute(0, 2, 1)) if self.scale: dimention = torch.sqrt(torch.tensor(k.shape[-1])) attn = attn / dimention if mask is not None: attn = attn.masked_fill(mask == 0, -1000000000.0) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = 0.0 tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6 * tmp1 tmp21 = tmp19 / tmp20 tmp22 = tl_math.exp(tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class ScaledDotProductAttentionNew(nn.Module): def __init__(self, dropout=0, scale=True): super().__init__() self.dropout = nn.Dropout(p=dropout) self.softmax = nn.Softmax(dim=2) self.scale = scale def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
dqawami/openvino_training_extensions
ScaledDotProductAttention
false
15,214
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
GateAddNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/72/c723jvtrclg3poj5zaxaf2ealxzfqdh7cxi7fq6hzj2mdver7zut.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, layer_norm], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => var_mean # sigmoid => sigmoid # x_1 => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_3), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp6) tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl.sigmoid(tmp20) tmp23 = tmp21 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pz/cpzkfkivzmt666fzil6yog5ut3a5n4ly6eaewnkssr54nufw2fc5.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, layer_norm], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => add_1, add_2, mul_1, mul_2, rsqrt, sub # sigmoid => sigmoid # x_1 => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_3), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_7), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_8), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, layer_norm], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0.run(buf0, buf1, primals_6, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, layer_norm], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(buf0, buf1, primals_6, buf2, buf3, primals_7, primals_8, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_8 return (buf4, primals_6, primals_7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GateAddNorm(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.glu = GatedLinearUnit(input_size, output_size, dropout) self.norm = nn.LayerNorm(output_size) def forward(self, x, skip): return self.norm(self.glu(x) + skip) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp6) tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl.sigmoid(tmp20) tmp23 = tmp21 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_native_layer_norm_sigmoid_0[grid(64)](buf0, buf1, primals_6, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(256)](buf0, buf1, primals_6, buf2, buf3, primals_7, primals_8, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_8 return buf4, primals_6, primals_7, reinterpret_tensor(primals_1, (64, 4 ), (4, 1), 0), buf0, buf1 class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GateAddNormNew(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.glu = GatedLinearUnit(input_size, output_size, dropout) self.norm = nn.LayerNorm(output_size) def forward(self, input_0, input_1): primals_2 = self.glu.w4.weight primals_3 = self.glu.w4.bias primals_4 = self.glu.w5.weight primals_5 = self.glu.w5.bias primals_7 = self.norm.weight primals_8 = self.norm.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
dqawami/openvino_training_extensions
GateAddNorm
false
15,215
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
_MCLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7x/c7xa2fzoeg7fkdhz3rijf3negddtnquocgqamwnzmzil4bl2mrsa.py # Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # ct => full # norm => full_default, pow_2, sum_1 # truediv => div # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%full_default, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(2,), equal_to_1=(1,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = (rindex // 4) tmp0 = 0.0 tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = tmp0 / tmp5 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (12*r2), [XBLOCK, RBLOCK])), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ik/ciktnpigrrv57ihedpahnqmqkyqnrxb2ve44dt2xuj4zd3xovwjp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_for_fused_1 = async_compile.triton('triton_for_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.foreach( num_warps=8, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'kernel_name': 'triton_for_fused_1', 'mutated_arg_names': [], 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, ) @triton.jit def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(16, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xnumel = 16 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xnumel = 16 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x3 = xindex % 4 x4 = (xindex // 4) tmp1 = tl.load(in_ptr1 + (x5), xmask) tl.store(out_ptr1 + (x3 + (12*x4)), tmp1, xmask) else: pass ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ro/croozag2ipv2izkskbnusmwfrgzvgxuxdtgvh6cmwihuv2ft2g6q.py # Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div] # Source node to ATen node mapping: # i => div_1 # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sigmoid, %expand), kwargs = {}) triton_poi_fused_div_sigmoid_2 = async_compile.triton('triton_poi_fused_div_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tl_math.abs(tmp3) tmp6 = tl.sigmoid(tmp5) tmp7 = tl_math.abs(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp11 = tl_math.abs(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp15 = tl_math.abs(tmp14) tmp16 = tmp12 + tmp15 tmp17 = 1e-12 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp1 / tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qo/cqohmoojuze4ghdeumxkas5argfq7scxxrrrrmi4ja5aujxqylbm.py # Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # ct => full # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_3 = async_compile.triton('triton_poi_fused_new_zeros_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sb/csb52ia7nfbux6ajj3tis46aqykq6p4giv3lsll3x4sellt3qcel.py # Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div] # Source node to ATen node mapping: # r => div_2 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand_1), kwargs = {}) triton_poi_fused_div_relu_4 = async_compile.triton('triton_poi_fused_div_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tl_math.abs(tmp7) tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tl_math.abs(tmp11) tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tl_math.abs(tmp15) tmp17 = tmp13 + tmp16 tmp18 = 1e-12 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp2 / tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7c/c7clas5lw2nusjly52sgckjeygnor3o6xedozxfydrxogcc5ah6f.py # Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] # Source node to ATen node mapping: # add_2 => add_2 # ct_1 => mul_1 # m_new => add_1 # norm_1 => abs_4, pow_8, sum_4 # o => sigmoid_1 # sub => sub # Graph fragment: # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %squeeze_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add_1), kwargs = {}) # %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_1,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_4, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 1.0), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_8, 1e-05), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_out_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ui/cuiig7hwoftgd73y5g4nxeusjygas4d43jgp6jstawnsxmuvtesk.py # Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_1, %select_5, %div_3], -1), kwargs = {}) triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (16 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u6/cu64qvgmjs5gpnv6x7okvus6vtjdt7t7hrq67kwq7qihx4y6hhlw.py # Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] # Source node to ATen node mapping: # add_4 => add_4 # ct_2 => mul_3 # m_new_1 => add_3 # norm_2 => abs_7, pow_14, sum_7 # o_1 => sigmoid_3 # sub_1 => sub_1 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_5,), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %squeeze_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_3), kwargs = {}) # %mul_3 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %add_3), kwargs = {}) # %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_3,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_7, None), kwargs = {}) # %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_7, 1.0), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_14, 1e-05), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tl/ctlao5dome6dvekazjdh4pzndckhm5j242u3aretn7zguljoglem.py # Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %select_6, %div_6], -1), kwargs = {}) triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (32 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5e/c5egga74hinxugpzzgffdhddgnaewjqkcaq6bjtgbdqsoepsqhjp.py # Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_3, %select_7, %div_9], -1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (48 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (48 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/56/c56dlfmd6ph7xm4t43oba7ddqf5xjsaulzwau7eevrwffyi54oh6.py # Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add] # Source node to ATen node mapping: # m_new_3 => add_7 # Graph fragment: # %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_6, %squeeze_7), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/en/cengjp6catznd72cpbumzf3yqxvgant3mspxbpqyimls3kme6rgd.py # Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack] # Source node to ATen node mapping: # c => cat_5 # m_out => cat_4 # Graph fragment: # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_2, %mul_4, %mul_6],), kwargs = {}) # %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_1, %mul_3, %mul_5, %mul_7],), kwargs = {}) triton_poi_fused_stack_11 = async_compile.triton('triton_poi_fused_stack_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp16 = tl.sigmoid(tmp15) tmp17 = tl.load(in_ptr3 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr4 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp26 = tl.sigmoid(tmp25) tmp27 = tl.load(in_ptr5 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tmp32 = tl.full([1], 16, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tl.load(in_ptr6 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0) tmp35 = tl.sigmoid(tmp34) tmp36 = tl.load(in_ptr7 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0) tmp37 = tmp35 * tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tmp43 = tl.load(in_ptr8 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp44 = tl.load(in_ptr9 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp45 = tl.load(in_ptr10 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp46 = 1.0 tmp47 = tmp46 - tmp35 tmp48 = tmp47 * tmp36 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp31, tmp48, tmp49) tmp51 = tl.where(tmp24, tmp45, tmp50) tmp52 = tl.where(tmp14, tmp44, tmp51) tmp53 = tl.where(tmp4, tmp43, tmp52) tl.store(out_ptr0 + (x2), tmp42, xmask) tl.store(out_ptr1 + (x2), tmp53, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 12), (12, 1)) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, 12), (12, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (4, 12), (12, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) # alias # Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0.run(buf3, 1, 16, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) # alias buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) # alias # Unsorted Source Nodes: [], Original ATen: [] triton_for_fused_1.run(primals_1, primals_2, buf1, buf2, grid=(2, 1, 1), stream=stream0) buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf5, buf8, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, i, matmul], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_3.run(buf10, 16, grid=grid(16), stream=stream0) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf6, buf11, 64, grid=grid(64), stream=stream0) buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu, r, matmul_1], Original ATen: [aten.relu, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1), 0), buf11, out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0); del buf12 # reuse buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5.run(buf13, buf16, buf9, buf7, buf14, 1, 16, grid=grid(1), stream=stream0) buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat] triton_poi_fused_cat_6.run(primals_1, primals_2, buf14, buf16, buf17, 48, grid=grid(48), stream=stream0) buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18) buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_1, r_1], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf19, buf20, 64, grid=grid(64), stream=stream0) buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_2, i_1], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf18, buf22, 64, grid=grid(64), stream=stream0) buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_2, i_1, matmul_2], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 16), buf22, out=buf23) buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1), 0), buf20, out=buf24) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((), (), torch.float32) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf25, buf28, buf24, buf21, buf26, 1, 16, grid=grid(1), stream=stream0) buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(primals_1, primals_2, buf26, buf28, buf29, 48, grid=grid(48), stream=stream0) buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0); del buf22 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30) buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_2, r_2], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf31, buf32, 64, grid=grid(64), stream=stream0) buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_4, i_2], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf30, buf34, 64, grid=grid(64), stream=stream0) buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_4, i_2, matmul_4], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 32), buf34, out=buf35) buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1), 0), buf32, out=buf36) buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0); del buf35 # reuse buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf39 = empty_strided_cuda((), (), torch.float32) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [o_2, m_new_2, sub_2, ct_3, norm_3, add_6], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf37, buf40, buf36, buf33, buf38, 1, 16, grid=grid(1), stream=stream0) buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(primals_1, primals_2, buf38, buf40, buf41, 48, grid=grid(48), stream=stream0) del primals_2 buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0); del buf34 # reuse # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42) del primals_4 buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43) del primals_6 buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_3, r_3], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf43, buf44, 64, grid=grid(64), stream=stream0) buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0); del buf36 # reuse # Topologically Sorted Source Nodes: [linear_11], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45) del primals_8 buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_6, i_3], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf42, buf46, 64, grid=grid(64), stream=stream0) buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_6, i_3, matmul_6], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 48), buf46, out=buf47) buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1), 0), buf44, out=buf48) buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0); del buf47 # reuse # Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf49, buf48, 16, grid=grid(16), stream=stream0) del buf48 buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0); del buf46 # reuse buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack] triton_poi_fused_stack_11.run(buf7, buf13, buf21, buf25, buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5, buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21, buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37, buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7, primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16), reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from typing import Tuple import torch.nn as nn class _Gate(nn.Module): """Utility class to implement a standard sigmoid gate""" def __init__(self, in_features: 'int', out_features: 'int'): super(_Gate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalised gate""" return torch.sigmoid(self.fc(x)) class _NormalizedGate(nn.Module): """Utility class to implement a gate with normalised activation function""" def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]', normalizer: 'str'): super(_NormalizedGate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_shape [0] * out_shape[1]) self.out_shape = out_shape if normalizer == 'normalized_sigmoid': self.activation = nn.Sigmoid() elif normalizer == 'normalized_relu': self.activation = nn.ReLU() else: raise ValueError( f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}" ) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalized gate""" h = self.fc(x).view(-1, *self.out_shape) return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1) class _MCLSTMCell(nn.Module): """The logic of the MC-LSTM cell""" def __init__(self, mass_input_size: 'int', aux_input_size: 'int', hidden_size: 'int', cfg: 'Config'): super(_MCLSTMCell, self).__init__() self.cfg = cfg self._hidden_size = hidden_size gate_inputs = aux_input_size + hidden_size + mass_input_size self.output_gate = _Gate(in_features=gate_inputs, out_features= hidden_size) self.input_gate = _NormalizedGate(in_features=gate_inputs, out_shape=(mass_input_size, hidden_size), normalizer= 'normalized_sigmoid') self.redistribution = _NormalizedGate(in_features=gate_inputs, out_shape=(hidden_size, hidden_size), normalizer='normalized_relu') self._reset_parameters() def _reset_parameters(self): if self.cfg.initial_forget_bias is not None: nn.init.constant_(self.output_gate.fc.bias, val=self.cfg. initial_forget_bias) def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """Perform forward pass on the MC-LSTM cell. Parameters ---------- x_m : torch.Tensor Mass input that will be conserved by the network. x_a : torch.Tensor Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally in the MC-LSTM cells. Returns ------- Tuple[torch.Tensor, torch.Tensor] Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size] """ _, batch_size, _ = x_m.size() ct = x_m.new_zeros((batch_size, self._hidden_size)) m_out, c = [], [] for xt_m, xt_a in zip(x_m, x_a): mt_out, ct = self._step(xt_m, xt_a, ct) m_out.append(mt_out) c.append(ct) m_out, c = torch.stack(m_out), torch.stack(c) return m_out, c def _step(self, xt_m, xt_a, c): """ Make a single time step in the MCLSTM. """ features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1) i = self.input_gate(features) r = self.redistribution(features) o = self.output_gate(features) m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2) m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2) m_new = m_in + m_sys return o * m_new, (1 - o) * m_new def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'mass_input_size': 4, 'aux_input_size': 4, 'hidden_size': 4, 'cfg': _mock_config(initial_forget_bias=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Tuple import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 tmp0 = 0.0 tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = tmp0 / tmp5 tl.store(out_ptr1 + tl.broadcast_to(r1 + 12 * r2, [XBLOCK, RBLOCK]), tmp6, None) @triton.jit def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(16, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xnumel = 16 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xnumel = 16 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x3 = xindex % 4 x4 = xindex // 4 tmp1 = tl.load(in_ptr1 + x5, xmask) tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask) else: pass @triton.jit def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tl_math.abs(tmp3) tmp6 = tl.sigmoid(tmp5) tmp7 = tl_math.abs(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp11 = tl_math.abs(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp15 = tl_math.abs(tmp14) tmp16 = tmp12 + tmp15 tmp17 = 1e-12 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp1 / tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tl_math.abs(tmp7) tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tl_math.abs(tmp11) tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tl_math.abs(tmp15) tmp17 = tmp13 + tmp16 tmp18 = 1e-12 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp2 / tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_out_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (48 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp16 = tl.sigmoid(tmp15) tmp17 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp26 = tl.sigmoid(tmp25) tmp27 = tl.load(in_ptr5 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tl.full([1], 16, tl.int64) tmp34 = tl.load(in_ptr6 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0) tmp35 = tl.sigmoid(tmp34) tmp36 = tl.load(in_ptr7 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0) tmp37 = tmp35 * tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tmp43 = tl.load(in_ptr8 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp44 = tl.load(in_ptr9 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp45 = tl.load(in_ptr10 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp46 = 1.0 tmp47 = tmp46 - tmp35 tmp48 = tmp47 * tmp36 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp31, tmp48, tmp49) tmp51 = tl.where(tmp24, tmp45, tmp50) tmp52 = tl.where(tmp14, tmp44, tmp51) tmp53 = tl.where(tmp4, tmp43, tmp52) tl.store(out_ptr0 + x2, tmp42, xmask) tl.store(out_ptr1 + x2, tmp53, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 12), (12, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 12), (12, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (4, 12), (12, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0[grid(1)](buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) triton_for_fused_1[2, 1, 1](primals_1, primals_2, buf1, buf2, num_warps=8, num_stages=1) buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf5, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_new_zeros_3[grid(16)](buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = buf8 del buf8 triton_poi_fused_div_relu_4[grid(64)](buf6, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1), 0), buf11, out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0) del buf12 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15 del buf15 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5[grid(1)]( buf13, buf16, buf9, buf7, buf14, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_6[grid(48)](primals_1, primals_2, buf14, buf16, buf17, 48, XBLOCK=64, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0) del buf11 extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18) buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf18, buf22, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 16), buf22, out=buf23) buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1), 0), buf20, out=buf24) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0) del buf23 buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((), (), torch.float32) buf28 = buf27 del buf27 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)]( buf25, buf28, buf24, buf21, buf26, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_8[grid(48)](primals_1, primals_2, buf26, buf28, buf29, 48, XBLOCK=64, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0) del buf22 extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30) buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf31, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0) del buf24 extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf30, buf34, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 32), buf34, out=buf35) buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1), 0), buf32, out=buf36) buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0) del buf35 buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf39 = empty_strided_cuda((), (), torch.float32) buf40 = buf39 del buf39 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)]( buf37, buf40, buf36, buf33, buf38, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_9[grid(48)](primals_1, primals_2, buf38, buf40, buf41, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0) del buf34 extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42) del primals_4 buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43) del primals_6 buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf43, buf44, 64, XBLOCK=64, num_warps=1, num_stages=1) buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0) del buf36 extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45) del primals_8 buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf42, buf46, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 48), buf46, out=buf47) buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1), 0), buf44, out=buf48) buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0) del buf47 triton_poi_fused_add_10[grid(16)](buf49, buf48, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf48 buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0) del buf46 buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_stack_11[grid(64)](buf7, buf13, buf21, buf25, buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5, buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21, buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37, buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7, primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16), reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0)) class _Gate(nn.Module): """Utility class to implement a standard sigmoid gate""" def __init__(self, in_features: 'int', out_features: 'int'): super(_Gate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalised gate""" return torch.sigmoid(self.fc(x)) class _NormalizedGate(nn.Module): """Utility class to implement a gate with normalised activation function""" def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]', normalizer: 'str'): super(_NormalizedGate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_shape [0] * out_shape[1]) self.out_shape = out_shape if normalizer == 'normalized_sigmoid': self.activation = nn.Sigmoid() elif normalizer == 'normalized_relu': self.activation = nn.ReLU() else: raise ValueError( f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}" ) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalized gate""" h = self.fc(x).view(-1, *self.out_shape) return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1) class _MCLSTMCellNew(nn.Module): """The logic of the MC-LSTM cell""" def __init__(self, mass_input_size: 'int', aux_input_size: 'int', hidden_size: 'int', cfg: 'Config'): super(_MCLSTMCellNew, self).__init__() self.cfg = cfg self._hidden_size = hidden_size gate_inputs = aux_input_size + hidden_size + mass_input_size self.output_gate = _Gate(in_features=gate_inputs, out_features= hidden_size) self.input_gate = _NormalizedGate(in_features=gate_inputs, out_shape=(mass_input_size, hidden_size), normalizer= 'normalized_sigmoid') self.redistribution = _NormalizedGate(in_features=gate_inputs, out_shape=(hidden_size, hidden_size), normalizer='normalized_relu') self._reset_parameters() def _reset_parameters(self): if self.cfg.initial_forget_bias is not None: nn.init.constant_(self.output_gate.fc.bias, val=self.cfg. initial_forget_bias) def _step(self, xt_m, xt_a, c): """ Make a single time step in the MCLSTM. """ features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1) i = self.input_gate(features) r = self.redistribution(features) o = self.output_gate(features) m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2) m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2) m_new = m_in + m_sys return o * m_new, (1 - o) * m_new def forward(self, input_0, input_1): primals_7 = self.output_gate.fc.weight primals_8 = self.output_gate.fc.bias primals_3 = self.input_gate.fc.weight primals_4 = self.input_gate.fc.bias primals_5 = self.redistribution.fc.weight primals_6 = self.redistribution.fc.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
DavidChoi76/neuralhydrology
_MCLSTMCell
false
15,216
[ "BSD-3-Clause" ]
144
a4c284b92934ee973c8b3fedf8a60df60c8feae1
https://github.com/DavidChoi76/neuralhydrology/tree/a4c284b92934ee973c8b3fedf8a60df60c8feae1
GatedResidualNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py # Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu] # Source node to ATen node mapping: # n2 => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/en/cenabni3h77zazokaukc6skf7kuv4ta2awact6sfvxbq7che2ucu.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # grn => var_mean # sigmoid => sigmoid # x_1 => mul_3 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp22 = tl.sigmoid(tmp21) tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cuj7uutcqmly235uixsc4lxbmmowhsqrjmbzktxbwhqmb7hgix5q.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # grn => add_1, add_2, mul_4, mul_5, rsqrt, sub # sigmoid => sigmoid # x_1 => mul_3 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_11), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_2 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [n1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, buf3, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_2.run(primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 del primals_11 return (buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GatedResidualNetwork(nn.Module): def __init__(self, input_size, hidden_size, output_size, context_size= None, dropout=0): super().__init__() self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(input_size, hidden_size) self.w3 = None if context_size is None else nn.Linear(context_size, hidden_size, bias=False) self.glu = GatedLinearUnit(hidden_size, output_size, dropout) self.layer_norm = nn.LayerNorm(output_size) self.residual = nn.Sequential( ) if input_size == output_size else nn.Linear(input_size, output_size) def forward(self, a, c=None): if c is not None: n2 = F.elu(self.w2(a) + self.w3(c)) else: n2 = F.elu(self.w2(a)) n1 = self.w1(n2) grn = self.layer_norm(self.residual(a) + self.glu(n1)) return grn def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp22 = tl.sigmoid(tmp21) tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)]( primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)]( primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_11 return buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4 class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GatedResidualNetworkNew(nn.Module): def __init__(self, input_size, hidden_size, output_size, context_size= None, dropout=0): super().__init__() self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(input_size, hidden_size) self.w3 = None if context_size is None else nn.Linear(context_size, hidden_size, bias=False) self.glu = GatedLinearUnit(hidden_size, output_size, dropout) self.layer_norm = nn.LayerNorm(output_size) self.residual = nn.Sequential( ) if input_size == output_size else nn.Linear(input_size, output_size) def forward(self, input_0): primals_1 = self.w1.weight primals_2 = self.w1.bias primals_4 = self.w2.weight primals_5 = self.w2.bias primals_6 = self.glu.w4.weight primals_7 = self.glu.w4.bias primals_8 = self.glu.w5.weight primals_9 = self.glu.w5.bias primals_10 = self.layer_norm.weight primals_11 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
dqawami/openvino_training_extensions
GatedResidualNetwork
false
15,217
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
SpatialAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3jyybnsmycovxduh7msitibikymwdrwokyrxcb4r43tskaegg7.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %permute_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 + tmp0 tmp8 = tmp3 + tmp7 tmp9 = tmp5 + tmp8 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py # Topologically Sorted Source Nodes: [convolved, out], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # convolved => convolution # out => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [convolved], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [convolved, out], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class SpatialAttention(nn.Module): def __init__(self, in_channels): super().__init__() self.activation = nn.Sigmoid() self.maxpool = nn.MaxPool2d((1, in_channels)) self.avgpool = nn.AvgPool2d((1, in_channels)) self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7, padding=3) def forward(self, x): maxpool = self.maxpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) avgpool = self.avgpool(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) convolved = self.conv(maxpool + avgpool) out = self.activation(convolved) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 + tmp0 tmp8 = tmp3 + tmp7 tmp9 = tmp5 + tmp8 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 1, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class SpatialAttentionNew(nn.Module): def __init__(self, in_channels): super().__init__() self.activation = nn.Sigmoid() self.maxpool = nn.MaxPool2d((1, in_channels)) self.avgpool = nn.AvgPool2d((1, in_channels)) self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7, padding=3) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dqawami/openvino_training_extensions
SpatialAttention
false
15,218
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] # Source node to ATen node mapping: # xu => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ac/cacdwifxdru2eihx3n66wqfym5hjpdo6yxk3gsol5t54xroplkwv.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1 => relu # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vp/cvpyw2bjgo55x2ne47dmpi2vmqu5t4eb3wcvjgjcnove3xyj7bcr.py # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x1_1 => relu_1 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400, ), (1, )) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300, ), (1, )) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1, ), (1, )) assert_size_stride(primals_9, (400, 8), (8, 1)) assert_size_stride(primals_10, (400, ), (1, )) assert_size_stride(primals_11, (300, 400), (400, 1)) assert_size_stride(primals_12, (300, ), (1, )) assert_size_stride(primals_13, (1, 300), (300, 1)) assert_size_stride(primals_14, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 1600, grid=grid(1600), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf4, primals_6, 1200, grid=grid(1200), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x1_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf7) del primals_9 buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf8, primals_10, 1600, grid=grid(1600), stream=stream0) del primals_10 buf9 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf10, primals_12, 1200, grid=grid(1200), stream=stream0) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x2_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) x2 = F.relu(self.l4(xu)) x2 = F.relu(self.l5(x2)) x2 = self.l6(x2) return x1, x2 def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (1, 300), (300, 1)) assert_size_stride(primals_8, (1,), (1,)) assert_size_stride(primals_9, (400, 8), (8, 1)) assert_size_stride(primals_10, (400,), (1,)) assert_size_stride(primals_11, (300, 400), (400, 1)) assert_size_stride(primals_12, (300,), (1,)) assert_size_stride(primals_13, (1, 300), (300, 1)) assert_size_stride(primals_14, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), ( 1, 400), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf7) del primals_9 buf8 = buf7 del buf7 triton_poi_fused_relu_1[grid(1600)](buf8, primals_10, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_10 buf9 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_2[grid(1200)](buf10, primals_12, 1200, XBLOCK =256, num_warps=4, num_stages=1) del primals_12 buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_14, buf10, reinterpret_tensor( primals_13, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf12) del primals_14 return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5) class CriticNew(nn.Module): def __init__(self, state_dim, action_dim): super(CriticNew, self).__init__() self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def Q1(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1 def forward(self, input_0, input_1): primals_3 = self.l1.weight primals_4 = self.l1.bias primals_5 = self.l2.weight primals_6 = self.l2.bias primals_7 = self.l3.weight primals_8 = self.l3.bias primals_9 = self.l4.weight primals_10 = self.l4.bias primals_11 = self.l5.weight primals_12 = self.l5.bias primals_13 = self.l6.weight primals_14 = self.l6.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
doudoulaile/RL-GAN-Net
Critic
false
15,219
[ "MIT" ]
112
9c221223d1878bc24f0f39ad34928c1bb2974ae3
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
SmallBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu] # Source node to ATen node mapping: # output => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3g/c3gulbvr4xrfq3wps6kqjc3yuakrgtdcdvb44tmfrvggj56xwcm6.py # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # output_2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y4/cy4ywivrvoulzmyoy5vjymbnro5whqtv6677rwbojlx53jirk7ab.py # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.add] # Source node to ATen node mapping: # output_4 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf4, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 return (buf4, primals_2, primals_3, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class SmallBlock(nn.Module): def __init__(self, channels): super(SmallBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=False) self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1, bias=False) def forward(self, x): identity_data = x output = self.relu(x) output = self.conv1(output) output = self.relu(output) output = self.conv2(output) output = torch.add(output, identity_data) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(256)](buf2, 256, XBLOCK=256, num_warps =4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_2[grid(256)](buf4, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf4, primals_2, primals_3, buf0, buf2 class SmallBlockNew(nn.Module): def __init__(self, channels): super(SmallBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=False) self.conv2 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1, bias=False) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dqawami/openvino_training_extensions
SmallBlock
false
15,220
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ye/cye7l2jaf362rrj43bugwtiqncxa3xnlfse2dg7bg4rqz2wqm2ew.py # Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # instance_norm => add, repeat, rsqrt, var_mean # output => relu # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_repeat_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = tl.full([XBLOCK, 1], 16, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp1 - tmp11 tmp19 = 16.0 tmp20 = tmp17 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tmp25 = tmp24 * tmp0 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr4 + (x0), tmp23, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i2/ci2jmbrazm5naptlijw4vyjquzzztqfkwcf67vgpuwbsxa3llhgy.py # Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add] # Source node to ATen node mapping: # output_1 => add_2, repeat_2, rsqrt_1, var_mean_1 # output_2 => add_4 # Graph fragment: # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %primals_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_repeat_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = tl.full([XBLOCK, 1], 16, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp1 - tmp11 tmp19 = 16.0 tmp20 = tmp17 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tmp25 = tmp24 * tmp0 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr4 + (x0), tmp23, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((16, ), (1, ), torch.float32) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [instance_norm, output], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_repeat_0.run(primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_3 del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((16, ), (1, ), torch.float32) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.add] triton_per_fused__native_batch_norm_legit_add_repeat_1.run(primals_6, buf7, primals_7, primals_1, buf8, buf9, buf13, buf12, 16, 16, grid=grid(16), stream=stream0) del primals_6 del primals_7 return (buf13, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf7, buf8, reinterpret_tensor(buf12, (16, ), (1, ), 0), reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class ResBlock(nn.Module): def __init__(self, num_of_channels): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels= num_of_channels, kernel_size=3, stride=1, padding=1, bias=False) self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels= num_of_channels, kernel_size=3, stride=1, padding=1, bias=False) self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True) def forward(self, x): orig = x output = self.relu(self.in1(self.conv1(x))) output = self.in2(self.conv2(output)) output = torch.add(output, orig) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_of_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_repeat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tl.where(xmask, tmp2, 0) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = tl.full([XBLOCK, 1], 16, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp1 - tmp11 tmp19 = 16.0 tmp20 = tmp17 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tmp25 = tmp24 * tmp0 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp23, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_repeat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tl.where(xmask, tmp2, 0) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = tl.full([XBLOCK, 1], 16, tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 / tmp10 tmp12 = tmp2 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp1 - tmp11 tmp19 = 16.0 tmp20 = tmp17 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tmp25 = tmp24 * tmp0 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp23, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((16,), (1,), torch.float32) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_repeat_0[grid(16)]( primals_3, buf0, primals_4, buf1, buf2, buf6, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_3 del primals_4 buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = empty_strided_cuda((16,), (1,), torch.float32) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_repeat_1[grid(16)]( primals_6, buf7, primals_7, primals_1, buf8, buf9, buf13, buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_6 del primals_7 return (buf13, primals_1, primals_2, primals_5, buf0, buf1, reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, buf8, reinterpret_tensor(buf12, (16,), (1,), 0), reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ResBlockNew(nn.Module): def __init__(self, num_of_channels): super(ResBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=num_of_channels, out_channels= num_of_channels, kernel_size=3, stride=1, padding=1, bias=False) self.in1 = nn.InstanceNorm2d(num_of_channels, affine=True) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(in_channels=num_of_channels, out_channels= num_of_channels, kernel_size=3, stride=1, padding=1, bias=False) self.in2 = nn.InstanceNorm2d(num_of_channels, affine=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.in1.weight primals_4 = self.in1.bias primals_5 = self.conv2.weight primals_6 = self.in2.weight primals_7 = self.in2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dqawami/openvino_training_extensions
ResBlock
false
15,221
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
EntmaxBisect
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdfyba37qrlylvsvpglykcmyrw6se7rfse25ksfthli6ayfctmp.py # Topologically Sorted Source Nodes: [max_1, sub, X, sub_1, max_val_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_10, clamp_1, truediv_1, p_m, sum_2, sub_6, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_13, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_3, tau_lo_2, dm_3, tau_m_2, sub_16, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_4, tau_lo_3, dm_4, tau_m_3, sub_19, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_5, tau_lo_4, dm_5, tau_m_4, sub_22, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_6, tau_lo_5, dm_6, tau_m_5, sub_25, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_7, tau_lo_6, dm_7, tau_m_6, sub_28, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_8, tau_lo_7, dm_8, tau_m_7, sub_31, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_9, tau_lo_8, dm_9, tau_m_8, sub_34, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_10, tau_lo_9, dm_10, tau_m_9, sub_37, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_11, tau_lo_10, dm_11, tau_m_10, sub_40, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_12, tau_lo_11, dm_12, tau_m_11, sub_43, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_13, tau_lo_12, dm_13, tau_m_12, sub_46, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_14, tau_lo_13, dm_14, tau_m_13, sub_49, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_15, tau_lo_14, dm_15, tau_m_14, sub_52, clamp_15, truediv_15, p_m_14, sum_16, f_m_14, mul_16, tau_lo_15, dm_16, tau_m_15, sub_55, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, mul_17, tau_lo_16, dm_17, tau_m_16, sub_58, clamp_17, truediv_17, p_m_16, sum_18, f_m_16, mul_18, tau_lo_17, dm_18, tau_m_17, sub_61, clamp_18, truediv_18, p_m_17, sum_19, f_m_17, tau_lo_18, dm_19, tau_m_18, sub_64, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_67, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_70, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_73, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_76, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_79, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_82, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_85, clamp_26, truediv_26, p_m_25, sum_27, tau_lo_26, dm_27, tau_m_26, sub_88, clamp_27, truediv_27, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_91, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.max, aten.sub, aten.mul, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where] # Source node to ATen node mapping: # X => mul # clamp => clamp_min # clamp_1 => clamp_min_1 # clamp_10 => clamp_min_10 # clamp_11 => clamp_min_11 # clamp_12 => clamp_min_12 # clamp_13 => clamp_min_13 # clamp_14 => clamp_min_14 # clamp_15 => clamp_min_15 # clamp_16 => clamp_min_16 # clamp_17 => clamp_min_17 # clamp_18 => clamp_min_18 # clamp_19 => clamp_min_19 # clamp_2 => clamp_min_2 # clamp_20 => clamp_min_20 # clamp_21 => clamp_min_21 # clamp_22 => clamp_min_22 # clamp_23 => clamp_min_23 # clamp_24 => clamp_min_24 # clamp_25 => clamp_min_25 # clamp_26 => clamp_min_26 # clamp_27 => clamp_min_27 # clamp_28 => clamp_min_28 # clamp_3 => clamp_min_3 # clamp_4 => clamp_min_4 # clamp_5 => clamp_min_5 # clamp_6 => clamp_min_6 # clamp_7 => clamp_min_7 # clamp_8 => clamp_min_8 # clamp_9 => clamp_min_9 # dm => sub_9 # dm_1 => div # dm_10 => div_9 # dm_11 => div_10 # dm_12 => div_11 # dm_13 => div_12 # dm_14 => div_13 # dm_15 => div_14 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_2 => div_1 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_3 => div_2 # dm_4 => div_3 # dm_5 => div_4 # dm_6 => div_5 # dm_7 => div_6 # dm_8 => div_7 # dm_9 => div_8 # f_lo => sub_8 # f_m_1 => sub_15 # f_m_10 => sub_42 # f_m_11 => sub_45 # f_m_12 => sub_48 # f_m_13 => sub_51 # f_m_14 => sub_54 # f_m_15 => sub_57 # f_m_16 => sub_60 # f_m_17 => sub_63 # f_m_2 => sub_18 # f_m_3 => sub_21 # f_m_4 => sub_24 # f_m_5 => sub_27 # f_m_6 => sub_30 # f_m_7 => sub_33 # f_m_8 => sub_36 # f_m_9 => sub_39 # max_1 => max_1 # max_val_1 => mul_1 # mul_10 => mul_20 # mul_11 => mul_22 # mul_12 => mul_24 # mul_13 => mul_26 # mul_14 => mul_28 # mul_15 => mul_30 # mul_16 => mul_32 # mul_17 => mul_34 # mul_18 => mul_36 # mul_3 => mul_6 # mul_4 => mul_8 # mul_5 => mul_10 # mul_6 => mul_12 # mul_7 => mul_14 # mul_8 => mul_16 # mul_9 => mul_18 # p_m => pow_4 # p_m_1 => pow_5 # p_m_10 => pow_14 # p_m_11 => pow_15 # p_m_12 => pow_16 # p_m_13 => pow_17 # p_m_14 => pow_18 # p_m_15 => pow_19 # p_m_16 => pow_20 # p_m_17 => pow_21 # p_m_18 => pow_22 # p_m_19 => pow_23 # p_m_2 => pow_6 # p_m_20 => pow_24 # p_m_21 => pow_25 # p_m_22 => pow_26 # p_m_23 => pow_27 # p_m_24 => pow_28 # p_m_25 => pow_29 # p_m_26 => pow_30 # p_m_27 => pow_31 # p_m_3 => pow_7 # p_m_4 => pow_8 # p_m_5 => pow_9 # p_m_6 => pow_10 # p_m_7 => pow_11 # p_m_8 => pow_12 # p_m_9 => pow_13 # pow_1 => full_default_2 # pow_2 => full_default_3 # pow_3 => pow_3 # sub => full_default # sub_1 => full_default_1 # sub_10 => sub_10 # sub_13 => sub_13 # sub_16 => sub_16 # sub_19 => sub_19 # sub_22 => sub_22 # sub_25 => sub_25 # sub_28 => sub_28 # sub_31 => sub_31 # sub_34 => sub_34 # sub_37 => sub_37 # sub_40 => sub_40 # sub_43 => sub_43 # sub_46 => sub_46 # sub_49 => sub_49 # sub_52 => sub_52 # sub_55 => sub_55 # sub_58 => sub_58 # sub_6 => sub_6 # sub_61 => sub_61 # sub_64 => sub_64 # sub_67 => sub_67 # sub_70 => sub_70 # sub_73 => sub_73 # sub_76 => sub_76 # sub_79 => sub_79 # sub_82 => sub_82 # sub_85 => sub_85 # sub_88 => sub_88 # sub_91 => sub_91 # sum_1 => sum_1 # sum_10 => sum_10 # sum_11 => sum_11 # sum_12 => sum_12 # sum_13 => sum_13 # sum_14 => sum_14 # sum_15 => sum_15 # sum_16 => sum_16 # sum_17 => sum_17 # sum_18 => sum_18 # sum_19 => sum_19 # sum_2 => sum_2 # sum_20 => sum_20 # sum_21 => sum_21 # sum_22 => sum_22 # sum_23 => sum_23 # sum_24 => sum_24 # sum_25 => sum_25 # sum_26 => sum_26 # sum_27 => sum_27 # sum_28 => sum_28 # sum_29 => sum_29 # sum_3 => sum_3 # sum_4 => sum_4 # sum_5 => sum_5 # sum_6 => sum_6 # sum_7 => sum_7 # sum_8 => sum_8 # sum_9 => sum_9 # tau_hi => sub_5 # tau_lo => sub_3 # tau_lo_1 => where # tau_lo_10 => where_9 # tau_lo_11 => where_10 # tau_lo_12 => where_11 # tau_lo_13 => where_12 # tau_lo_14 => where_13 # tau_lo_15 => where_14 # tau_lo_16 => where_15 # tau_lo_17 => where_16 # tau_lo_18 => where_17 # tau_lo_19 => where_18 # tau_lo_2 => where_1 # tau_lo_20 => where_19 # tau_lo_21 => where_20 # tau_lo_22 => where_21 # tau_lo_23 => where_22 # tau_lo_24 => where_23 # tau_lo_25 => where_24 # tau_lo_26 => where_25 # tau_lo_27 => where_26 # tau_lo_3 => where_2 # tau_lo_4 => where_3 # tau_lo_5 => where_4 # tau_lo_6 => where_5 # tau_lo_7 => where_6 # tau_lo_8 => where_7 # tau_lo_9 => where_8 # tau_m => add # tau_m_1 => add_1 # tau_m_10 => add_10 # tau_m_11 => add_11 # tau_m_12 => add_12 # tau_m_13 => add_13 # tau_m_14 => add_14 # tau_m_15 => add_15 # tau_m_16 => add_16 # tau_m_17 => add_17 # tau_m_18 => add_18 # tau_m_19 => add_19 # tau_m_2 => add_2 # tau_m_20 => add_20 # tau_m_21 => add_21 # tau_m_22 => add_22 # tau_m_23 => add_23 # tau_m_24 => add_24 # tau_m_25 => add_25 # tau_m_26 => add_26 # tau_m_27 => add_27 # tau_m_3 => add_3 # tau_m_4 => add_4 # tau_m_5 => add_5 # tau_m_6 => add_6 # tau_m_7 => add_7 # tau_m_8 => add_8 # tau_m_9 => add_9 # truediv => full_default_4 # truediv_1 => full_default_5 # truediv_10 => full_default_14 # truediv_11 => full_default_15 # truediv_12 => full_default_16 # truediv_13 => full_default_17 # truediv_14 => full_default_18 # truediv_15 => full_default_19 # truediv_16 => full_default_20 # truediv_17 => full_default_21 # truediv_18 => full_default_22 # truediv_19 => full_default_23 # truediv_2 => full_default_6 # truediv_20 => full_default_24 # truediv_21 => full_default_25 # truediv_22 => full_default_26 # truediv_23 => full_default_27 # truediv_24 => full_default_28 # truediv_25 => full_default_29 # truediv_26 => full_default_30 # truediv_27 => full_default_31 # truediv_28 => full_default_32 # truediv_3 => full_default_7 # truediv_4 => full_default_8 # truediv_5 => full_default_9 # truediv_6 => full_default_10 # truediv_7 => full_default_11 # truediv_8 => full_default_12 # truediv_9 => full_default_13 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %full_default_1), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %full_default_3), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_3 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %full_default_2), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %sub_3), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_9, 2), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, %div), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_1, %full_default_5), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [-1]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_6, 0), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min, %full_default_4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1]), kwargs = {}) # %sub_8 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_3), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_1), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_13, 0), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_2, %full_default_6), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [-1]), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %sub_8), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_2), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_3, %full_default_7), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_6, [-1]), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_8), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {}) # %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_3), kwargs = {}) # %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_19, 0), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_4, %full_default_8), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [-1]), kwargs = {}) # %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %sub_8), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {}) # %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_4), kwargs = {}) # %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_22, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_5, %full_default_9), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_8, [-1]), kwargs = {}) # %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %sub_8), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {}) # %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_5), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_25, 0), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_6, %full_default_10), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, [-1]), kwargs = {}) # %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %sub_8), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {}) # %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_6), kwargs = {}) # %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_10 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_7, %full_default_11), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_10, [-1]), kwargs = {}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %sub_8), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {}) # %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {}) # %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_7), kwargs = {}) # %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_31, 0), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_11 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_8, %full_default_12), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_11, [-1]), kwargs = {}) # %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %sub_8), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {}) # %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {}) # %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_8), kwargs = {}) # %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_34, 0), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_9, %full_default_13), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_12, [-1]), kwargs = {}) # %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %sub_8), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {}) # %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {}) # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {}) # %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_9), kwargs = {}) # %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_37, 0), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_13 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_10, %full_default_14), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_13, [-1]), kwargs = {}) # %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %sub_8), kwargs = {}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {}) # %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {}) # %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {}) # %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_10), kwargs = {}) # %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_40, 0), kwargs = {}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_11, %full_default_15), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_14, [-1]), kwargs = {}) # %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %sub_8), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {}) # %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {}) # %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_11), kwargs = {}) # %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_43, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_15 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_12, %full_default_16), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_15, [-1]), kwargs = {}) # %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %sub_8), kwargs = {}) # %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {}) # %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {}) # %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_12), kwargs = {}) # %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_46, 0), kwargs = {}) # %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_16 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_13, %full_default_17), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_16, [-1]), kwargs = {}) # %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %sub_8), kwargs = {}) # %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {}) # %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {}) # %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {}) # %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_13), kwargs = {}) # %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_49, 0), kwargs = {}) # %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_17 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_14, %full_default_18), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_17, [-1]), kwargs = {}) # %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %sub_8), kwargs = {}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {}) # %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {}) # %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_14), kwargs = {}) # %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_52, 0), kwargs = {}) # %full_default_19 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_18 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_15, %full_default_19), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_18, [-1]), kwargs = {}) # %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_16, 1), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %sub_8), kwargs = {}) # %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {}) # %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_15), kwargs = {}) # %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_55, 0), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_19 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_16, %full_default_20), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_19, [-1]), kwargs = {}) # %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %sub_8), kwargs = {}) # %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {}) # %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_16), kwargs = {}) # %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_58, 0), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_20 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_17, %full_default_21), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_20, [-1]), kwargs = {}) # %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_18, 1), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %sub_8), kwargs = {}) # %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {}) # %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_17), kwargs = {}) # %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_61, 0), kwargs = {}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_21 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_18, %full_default_22), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_21, [-1]), kwargs = {}) # %sub_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_19, 1), kwargs = {}) # %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {}) # %sub_64 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_18), kwargs = {}) # %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_64, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_22 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_19, %full_default_23), kwargs = {}) # %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_22, [-1]), kwargs = {}) # %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {}) # %sub_67 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_19), kwargs = {}) # %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_67, 0), kwargs = {}) # %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_23 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_20, %full_default_24), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_23, [-1]), kwargs = {}) # %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {}) # %sub_70 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_20), kwargs = {}) # %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_70, 0), kwargs = {}) # %full_default_25 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_24 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_21, %full_default_25), kwargs = {}) # %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_24, [-1]), kwargs = {}) # %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {}) # %sub_73 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_21), kwargs = {}) # %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_73, 0), kwargs = {}) # %full_default_26 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_25 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_22, %full_default_26), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_25, [-1]), kwargs = {}) # %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {}) # %sub_76 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_22), kwargs = {}) # %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_76, 0), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_26 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_23, %full_default_27), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_26, [-1]), kwargs = {}) # %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {}) # %sub_79 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_23), kwargs = {}) # %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_79, 0), kwargs = {}) # %full_default_28 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_27 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_24, %full_default_28), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_27, [-1]), kwargs = {}) # %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {}) # %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_24), kwargs = {}) # %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_82, 0), kwargs = {}) # %full_default_29 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_28 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_25, %full_default_29), kwargs = {}) # %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_28, [-1]), kwargs = {}) # %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {}) # %sub_85 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_25), kwargs = {}) # %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_85, 0), kwargs = {}) # %full_default_30 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_29 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_26, %full_default_30), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_29, [-1]), kwargs = {}) # %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {}) # %sub_88 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_26), kwargs = {}) # %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_88, 0), kwargs = {}) # %full_default_31 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_30 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_27, %full_default_31), kwargs = {}) # %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_30, [-1]), kwargs = {}) # %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %sub_91 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_27), kwargs = {}) # %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_91, 0), kwargs = {}) # %full_default_32 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_31 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_28, %full_default_32), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_31, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr13'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr13, in_ptr0, out_ptr0, out_ptr25, out_ptr31, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = triton_helpers.maximum(tmp0, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp1 tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp9 - tmp1 tmp13 = tmp12 - tmp11 tmp14 = tmp13 * tmp1 tmp15 = tmp11 + tmp14 tmp16 = tmp2 - tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = 2.0 tmp20 = libdevice.pow(tmp18, tmp19) tmp21 = tmp3 * tmp1 tmp22 = tmp21 - tmp15 tmp23 = triton_helpers.maximum(tmp22, tmp17) tmp24 = libdevice.pow(tmp23, tmp19) tmp25 = tmp20 + tmp24 tmp26 = tmp5 * tmp1 tmp27 = tmp26 - tmp15 tmp28 = triton_helpers.maximum(tmp27, tmp17) tmp29 = libdevice.pow(tmp28, tmp19) tmp30 = tmp25 + tmp29 tmp31 = tmp7 * tmp1 tmp32 = tmp31 - tmp15 tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = libdevice.pow(tmp33, tmp19) tmp35 = tmp30 + tmp34 tmp36 = tmp2 - tmp11 tmp37 = triton_helpers.maximum(tmp36, tmp17) tmp38 = libdevice.pow(tmp37, tmp19) tmp39 = tmp21 - tmp11 tmp40 = triton_helpers.maximum(tmp39, tmp17) tmp41 = libdevice.pow(tmp40, tmp19) tmp42 = tmp38 + tmp41 tmp43 = tmp26 - tmp11 tmp44 = triton_helpers.maximum(tmp43, tmp17) tmp45 = libdevice.pow(tmp44, tmp19) tmp46 = tmp42 + tmp45 tmp47 = tmp31 - tmp11 tmp48 = triton_helpers.maximum(tmp47, tmp17) tmp49 = libdevice.pow(tmp48, tmp19) tmp50 = tmp46 + tmp49 tmp51 = tmp35 - tmp10 tmp52 = tmp50 - tmp10 tmp53 = tmp51 * tmp52 tmp54 = tmp53 >= tmp17 tmp55 = tl.where(tmp54, tmp15, tmp11) tmp56 = tmp14 * tmp1 tmp57 = tmp55 + tmp56 tmp58 = tmp2 - tmp57 tmp59 = triton_helpers.maximum(tmp58, tmp17) tmp60 = libdevice.pow(tmp59, tmp19) tmp61 = tmp21 - tmp57 tmp62 = triton_helpers.maximum(tmp61, tmp17) tmp63 = libdevice.pow(tmp62, tmp19) tmp64 = tmp60 + tmp63 tmp65 = tmp26 - tmp57 tmp66 = triton_helpers.maximum(tmp65, tmp17) tmp67 = libdevice.pow(tmp66, tmp19) tmp68 = tmp64 + tmp67 tmp69 = tmp31 - tmp57 tmp70 = triton_helpers.maximum(tmp69, tmp17) tmp71 = libdevice.pow(tmp70, tmp19) tmp72 = tmp68 + tmp71 tmp73 = tmp72 - tmp10 tmp74 = tmp73 * tmp52 tmp75 = tmp74 >= tmp17 tmp76 = tl.where(tmp75, tmp57, tmp55) tmp77 = tmp56 * tmp1 tmp78 = tmp76 + tmp77 tmp79 = tmp2 - tmp78 tmp80 = triton_helpers.maximum(tmp79, tmp17) tmp81 = libdevice.pow(tmp80, tmp19) tmp82 = tmp21 - tmp78 tmp83 = triton_helpers.maximum(tmp82, tmp17) tmp84 = libdevice.pow(tmp83, tmp19) tmp85 = tmp81 + tmp84 tmp86 = tmp26 - tmp78 tmp87 = triton_helpers.maximum(tmp86, tmp17) tmp88 = libdevice.pow(tmp87, tmp19) tmp89 = tmp85 + tmp88 tmp90 = tmp31 - tmp78 tmp91 = triton_helpers.maximum(tmp90, tmp17) tmp92 = libdevice.pow(tmp91, tmp19) tmp93 = tmp89 + tmp92 tmp94 = tmp93 - tmp10 tmp95 = tmp94 * tmp52 tmp96 = tmp95 >= tmp17 tmp97 = tl.where(tmp96, tmp78, tmp76) tmp98 = tmp77 * tmp1 tmp99 = tmp97 + tmp98 tmp100 = tmp2 - tmp99 tmp101 = triton_helpers.maximum(tmp100, tmp17) tmp102 = libdevice.pow(tmp101, tmp19) tmp103 = tmp21 - tmp99 tmp104 = triton_helpers.maximum(tmp103, tmp17) tmp105 = libdevice.pow(tmp104, tmp19) tmp106 = tmp102 + tmp105 tmp107 = tmp26 - tmp99 tmp108 = triton_helpers.maximum(tmp107, tmp17) tmp109 = libdevice.pow(tmp108, tmp19) tmp110 = tmp106 + tmp109 tmp111 = tmp31 - tmp99 tmp112 = triton_helpers.maximum(tmp111, tmp17) tmp113 = libdevice.pow(tmp112, tmp19) tmp114 = tmp110 + tmp113 tmp115 = tmp114 - tmp10 tmp116 = tmp115 * tmp52 tmp117 = tmp116 >= tmp17 tmp118 = tl.where(tmp117, tmp99, tmp97) tmp119 = tmp98 * tmp1 tmp120 = tmp118 + tmp119 tmp121 = tmp2 - tmp120 tmp122 = triton_helpers.maximum(tmp121, tmp17) tmp123 = libdevice.pow(tmp122, tmp19) tmp124 = tmp21 - tmp120 tmp125 = triton_helpers.maximum(tmp124, tmp17) tmp126 = libdevice.pow(tmp125, tmp19) tmp127 = tmp123 + tmp126 tmp128 = tmp26 - tmp120 tmp129 = triton_helpers.maximum(tmp128, tmp17) tmp130 = libdevice.pow(tmp129, tmp19) tmp131 = tmp127 + tmp130 tmp132 = tmp31 - tmp120 tmp133 = triton_helpers.maximum(tmp132, tmp17) tmp134 = libdevice.pow(tmp133, tmp19) tmp135 = tmp131 + tmp134 tmp136 = tmp135 - tmp10 tmp137 = tmp136 * tmp52 tmp138 = tmp137 >= tmp17 tmp139 = tl.where(tmp138, tmp120, tmp118) tmp140 = tmp119 * tmp1 tmp141 = tmp139 + tmp140 tmp142 = tmp2 - tmp141 tmp143 = triton_helpers.maximum(tmp142, tmp17) tmp144 = libdevice.pow(tmp143, tmp19) tmp145 = tmp21 - tmp141 tmp146 = triton_helpers.maximum(tmp145, tmp17) tmp147 = libdevice.pow(tmp146, tmp19) tmp148 = tmp144 + tmp147 tmp149 = tmp26 - tmp141 tmp150 = triton_helpers.maximum(tmp149, tmp17) tmp151 = libdevice.pow(tmp150, tmp19) tmp152 = tmp148 + tmp151 tmp153 = tmp31 - tmp141 tmp154 = triton_helpers.maximum(tmp153, tmp17) tmp155 = libdevice.pow(tmp154, tmp19) tmp156 = tmp152 + tmp155 tmp157 = tmp156 - tmp10 tmp158 = tmp157 * tmp52 tmp159 = tmp158 >= tmp17 tmp160 = tl.where(tmp159, tmp141, tmp139) tmp161 = tmp140 * tmp1 tmp162 = tmp160 + tmp161 tmp163 = tmp2 - tmp162 tmp164 = triton_helpers.maximum(tmp163, tmp17) tmp165 = libdevice.pow(tmp164, tmp19) tmp166 = tmp21 - tmp162 tmp167 = triton_helpers.maximum(tmp166, tmp17) tmp168 = libdevice.pow(tmp167, tmp19) tmp169 = tmp165 + tmp168 tmp170 = tmp26 - tmp162 tmp171 = triton_helpers.maximum(tmp170, tmp17) tmp172 = libdevice.pow(tmp171, tmp19) tmp173 = tmp169 + tmp172 tmp174 = tmp31 - tmp162 tmp175 = triton_helpers.maximum(tmp174, tmp17) tmp176 = libdevice.pow(tmp175, tmp19) tmp177 = tmp173 + tmp176 tmp178 = tmp177 - tmp10 tmp179 = tmp178 * tmp52 tmp180 = tmp179 >= tmp17 tmp181 = tl.where(tmp180, tmp162, tmp160) tmp182 = tmp161 * tmp1 tmp183 = tmp181 + tmp182 tmp184 = tmp2 - tmp183 tmp185 = triton_helpers.maximum(tmp184, tmp17) tmp186 = libdevice.pow(tmp185, tmp19) tmp187 = tmp21 - tmp183 tmp188 = triton_helpers.maximum(tmp187, tmp17) tmp189 = libdevice.pow(tmp188, tmp19) tmp190 = tmp186 + tmp189 tmp191 = tmp26 - tmp183 tmp192 = triton_helpers.maximum(tmp191, tmp17) tmp193 = libdevice.pow(tmp192, tmp19) tmp194 = tmp190 + tmp193 tmp195 = tmp31 - tmp183 tmp196 = triton_helpers.maximum(tmp195, tmp17) tmp197 = libdevice.pow(tmp196, tmp19) tmp198 = tmp194 + tmp197 tmp199 = tmp198 - tmp10 tmp200 = tmp199 * tmp52 tmp201 = tmp200 >= tmp17 tmp202 = tl.where(tmp201, tmp183, tmp181) tmp203 = tmp182 * tmp1 tmp204 = tmp202 + tmp203 tmp205 = tmp2 - tmp204 tmp206 = triton_helpers.maximum(tmp205, tmp17) tmp207 = libdevice.pow(tmp206, tmp19) tmp208 = tmp21 - tmp204 tmp209 = triton_helpers.maximum(tmp208, tmp17) tmp210 = libdevice.pow(tmp209, tmp19) tmp211 = tmp207 + tmp210 tmp212 = tmp26 - tmp204 tmp213 = triton_helpers.maximum(tmp212, tmp17) tmp214 = libdevice.pow(tmp213, tmp19) tmp215 = tmp211 + tmp214 tmp216 = tmp31 - tmp204 tmp217 = triton_helpers.maximum(tmp216, tmp17) tmp218 = libdevice.pow(tmp217, tmp19) tmp219 = tmp215 + tmp218 tmp220 = tmp219 - tmp10 tmp221 = tmp220 * tmp52 tmp222 = tmp221 >= tmp17 tmp223 = tl.where(tmp222, tmp204, tmp202) tmp224 = tmp203 * tmp1 tmp225 = tmp223 + tmp224 tmp226 = tmp2 - tmp225 tmp227 = triton_helpers.maximum(tmp226, tmp17) tmp228 = libdevice.pow(tmp227, tmp19) tmp229 = tmp21 - tmp225 tmp230 = triton_helpers.maximum(tmp229, tmp17) tmp231 = libdevice.pow(tmp230, tmp19) tmp232 = tmp228 + tmp231 tmp233 = tmp26 - tmp225 tmp234 = triton_helpers.maximum(tmp233, tmp17) tmp235 = libdevice.pow(tmp234, tmp19) tmp236 = tmp232 + tmp235 tmp237 = tmp31 - tmp225 tmp238 = triton_helpers.maximum(tmp237, tmp17) tmp239 = libdevice.pow(tmp238, tmp19) tmp240 = tmp236 + tmp239 tmp241 = tmp240 - tmp10 tmp242 = tmp241 * tmp52 tmp243 = tmp242 >= tmp17 tmp244 = tl.where(tmp243, tmp225, tmp223) tmp245 = tmp224 * tmp1 tmp246 = tmp244 + tmp245 tmp247 = tmp2 - tmp246 tmp248 = triton_helpers.maximum(tmp247, tmp17) tmp249 = libdevice.pow(tmp248, tmp19) tmp250 = tmp21 - tmp246 tmp251 = triton_helpers.maximum(tmp250, tmp17) tmp252 = libdevice.pow(tmp251, tmp19) tmp253 = tmp249 + tmp252 tmp254 = tmp26 - tmp246 tmp255 = triton_helpers.maximum(tmp254, tmp17) tmp256 = libdevice.pow(tmp255, tmp19) tmp257 = tmp253 + tmp256 tmp258 = tmp31 - tmp246 tmp259 = triton_helpers.maximum(tmp258, tmp17) tmp260 = libdevice.pow(tmp259, tmp19) tmp261 = tmp257 + tmp260 tmp262 = tmp261 - tmp10 tmp263 = tmp262 * tmp52 tmp264 = tmp263 >= tmp17 tmp265 = tl.where(tmp264, tmp246, tmp244) tmp266 = tmp245 * tmp1 tmp267 = tmp265 + tmp266 tmp268 = tmp2 - tmp267 tmp269 = triton_helpers.maximum(tmp268, tmp17) tmp270 = libdevice.pow(tmp269, tmp19) tmp271 = tmp21 - tmp267 tmp272 = triton_helpers.maximum(tmp271, tmp17) tmp273 = libdevice.pow(tmp272, tmp19) tmp274 = tmp270 + tmp273 tmp275 = tmp26 - tmp267 tmp276 = triton_helpers.maximum(tmp275, tmp17) tmp277 = libdevice.pow(tmp276, tmp19) tmp278 = tmp274 + tmp277 tmp279 = tmp31 - tmp267 tmp280 = triton_helpers.maximum(tmp279, tmp17) tmp281 = libdevice.pow(tmp280, tmp19) tmp282 = tmp278 + tmp281 tmp283 = tmp282 - tmp10 tmp284 = tmp283 * tmp52 tmp285 = tmp284 >= tmp17 tmp286 = tl.where(tmp285, tmp267, tmp265) tmp287 = tmp266 * tmp1 tmp288 = tmp286 + tmp287 tmp289 = tmp2 - tmp288 tmp290 = triton_helpers.maximum(tmp289, tmp17) tmp291 = libdevice.pow(tmp290, tmp19) tmp292 = tmp21 - tmp288 tmp293 = triton_helpers.maximum(tmp292, tmp17) tmp294 = libdevice.pow(tmp293, tmp19) tmp295 = tmp291 + tmp294 tmp296 = tmp26 - tmp288 tmp297 = triton_helpers.maximum(tmp296, tmp17) tmp298 = libdevice.pow(tmp297, tmp19) tmp299 = tmp295 + tmp298 tmp300 = tmp31 - tmp288 tmp301 = triton_helpers.maximum(tmp300, tmp17) tmp302 = libdevice.pow(tmp301, tmp19) tmp303 = tmp299 + tmp302 tmp304 = tmp303 - tmp10 tmp305 = tmp304 * tmp52 tmp306 = tmp305 >= tmp17 tmp307 = tl.where(tmp306, tmp288, tmp286) tmp308 = tmp287 * tmp1 tmp309 = tmp307 + tmp308 tmp310 = tmp2 - tmp309 tmp311 = triton_helpers.maximum(tmp310, tmp17) tmp312 = libdevice.pow(tmp311, tmp19) tmp313 = tmp21 - tmp309 tmp314 = triton_helpers.maximum(tmp313, tmp17) tmp315 = libdevice.pow(tmp314, tmp19) tmp316 = tmp312 + tmp315 tmp317 = tmp26 - tmp309 tmp318 = triton_helpers.maximum(tmp317, tmp17) tmp319 = libdevice.pow(tmp318, tmp19) tmp320 = tmp316 + tmp319 tmp321 = tmp31 - tmp309 tmp322 = triton_helpers.maximum(tmp321, tmp17) tmp323 = libdevice.pow(tmp322, tmp19) tmp324 = tmp320 + tmp323 tmp325 = tmp324 - tmp10 tmp326 = tmp325 * tmp52 tmp327 = tmp326 >= tmp17 tmp328 = tl.where(tmp327, tmp309, tmp307) tmp329 = tmp308 * tmp1 tmp330 = tmp328 + tmp329 tmp331 = tmp2 - tmp330 tmp332 = triton_helpers.maximum(tmp331, tmp17) tmp333 = libdevice.pow(tmp332, tmp19) tmp334 = tmp21 - tmp330 tmp335 = triton_helpers.maximum(tmp334, tmp17) tmp336 = libdevice.pow(tmp335, tmp19) tmp337 = tmp333 + tmp336 tmp338 = tmp26 - tmp330 tmp339 = triton_helpers.maximum(tmp338, tmp17) tmp340 = libdevice.pow(tmp339, tmp19) tmp341 = tmp337 + tmp340 tmp342 = tmp31 - tmp330 tmp343 = triton_helpers.maximum(tmp342, tmp17) tmp344 = libdevice.pow(tmp343, tmp19) tmp345 = tmp341 + tmp344 tmp346 = tmp345 - tmp10 tmp347 = tmp346 * tmp52 tmp348 = tmp347 >= tmp17 tmp349 = tl.where(tmp348, tmp330, tmp328) tmp350 = tmp329 * tmp1 tmp351 = tmp349 + tmp350 tmp352 = tmp2 - tmp351 tmp353 = triton_helpers.maximum(tmp352, tmp17) tmp354 = libdevice.pow(tmp353, tmp19) tmp355 = tmp21 - tmp351 tmp356 = triton_helpers.maximum(tmp355, tmp17) tmp357 = libdevice.pow(tmp356, tmp19) tmp358 = tmp354 + tmp357 tmp359 = tmp26 - tmp351 tmp360 = triton_helpers.maximum(tmp359, tmp17) tmp361 = libdevice.pow(tmp360, tmp19) tmp362 = tmp358 + tmp361 tmp363 = tmp31 - tmp351 tmp364 = triton_helpers.maximum(tmp363, tmp17) tmp365 = libdevice.pow(tmp364, tmp19) tmp366 = tmp362 + tmp365 tmp367 = tmp366 - tmp10 tmp368 = tmp367 * tmp52 tmp369 = tmp368 >= tmp17 tmp370 = tl.where(tmp369, tmp351, tmp349) tmp371 = tmp350 * tmp1 tmp372 = tmp370 + tmp371 tmp373 = tmp2 - tmp372 tmp374 = triton_helpers.maximum(tmp373, tmp17) tmp375 = libdevice.pow(tmp374, tmp19) tmp376 = tmp21 - tmp372 tmp377 = triton_helpers.maximum(tmp376, tmp17) tmp378 = libdevice.pow(tmp377, tmp19) tmp379 = tmp375 + tmp378 tmp380 = tmp26 - tmp372 tmp381 = triton_helpers.maximum(tmp380, tmp17) tmp382 = libdevice.pow(tmp381, tmp19) tmp383 = tmp379 + tmp382 tmp384 = tmp31 - tmp372 tmp385 = triton_helpers.maximum(tmp384, tmp17) tmp386 = libdevice.pow(tmp385, tmp19) tmp387 = tmp383 + tmp386 tmp388 = tmp387 - tmp10 tmp389 = tmp388 * tmp52 tmp390 = tmp389 >= tmp17 tmp391 = tl.where(tmp390, tmp372, tmp370) tmp392 = tmp371 * tmp1 tmp393 = tmp391 + tmp392 tmp394 = tmp2 - tmp393 tmp395 = triton_helpers.maximum(tmp394, tmp17) tmp396 = libdevice.pow(tmp395, tmp19) tmp397 = tmp21 - tmp393 tmp398 = triton_helpers.maximum(tmp397, tmp17) tmp399 = libdevice.pow(tmp398, tmp19) tmp400 = tmp396 + tmp399 tmp401 = tmp26 - tmp393 tmp402 = triton_helpers.maximum(tmp401, tmp17) tmp403 = libdevice.pow(tmp402, tmp19) tmp404 = tmp400 + tmp403 tmp405 = tmp31 - tmp393 tmp406 = triton_helpers.maximum(tmp405, tmp17) tmp407 = libdevice.pow(tmp406, tmp19) tmp408 = tmp404 + tmp407 tmp409 = tmp408 - tmp10 tmp410 = tmp409 * tmp52 tmp411 = tmp410 >= tmp17 tmp412 = tl.where(tmp411, tmp393, tmp391) tmp413 = tmp392 * tmp1 tmp414 = tmp412 + tmp413 tmp415 = tmp2 - tmp414 tmp416 = triton_helpers.maximum(tmp415, tmp17) tmp417 = libdevice.pow(tmp416, tmp19) tmp418 = tmp21 - tmp414 tmp419 = triton_helpers.maximum(tmp418, tmp17) tmp420 = libdevice.pow(tmp419, tmp19) tmp421 = tmp417 + tmp420 tmp422 = tmp26 - tmp414 tmp423 = triton_helpers.maximum(tmp422, tmp17) tmp424 = libdevice.pow(tmp423, tmp19) tmp425 = tmp421 + tmp424 tmp426 = tmp31 - tmp414 tmp427 = triton_helpers.maximum(tmp426, tmp17) tmp428 = libdevice.pow(tmp427, tmp19) tmp429 = tmp425 + tmp428 tmp430 = tmp429 - tmp10 tmp431 = tmp430 * tmp52 tmp432 = tmp431 >= tmp17 tmp433 = tl.where(tmp432, tmp414, tmp412) tmp434 = tmp413 * tmp1 tmp435 = tmp433 + tmp434 tmp436 = tmp2 - tmp435 tmp437 = triton_helpers.maximum(tmp436, tmp17) tmp438 = libdevice.pow(tmp437, tmp19) tmp439 = tmp21 - tmp435 tmp440 = triton_helpers.maximum(tmp439, tmp17) tmp441 = libdevice.pow(tmp440, tmp19) tmp442 = tmp438 + tmp441 tmp443 = tmp26 - tmp435 tmp444 = triton_helpers.maximum(tmp443, tmp17) tmp445 = libdevice.pow(tmp444, tmp19) tmp446 = tmp442 + tmp445 tmp447 = tmp31 - tmp435 tmp448 = triton_helpers.maximum(tmp447, tmp17) tmp449 = libdevice.pow(tmp448, tmp19) tmp450 = tmp446 + tmp449 tmp451 = tmp450 - tmp10 tmp452 = tmp451 * tmp52 tmp453 = tmp452 >= tmp17 tmp454 = tl.where(tmp453, tmp435, tmp433) tmp455 = tmp434 * tmp1 tmp456 = tmp454 + tmp455 tmp457 = tmp2 - tmp456 tmp458 = triton_helpers.maximum(tmp457, tmp17) tmp459 = libdevice.pow(tmp458, tmp19) tmp460 = tmp21 - tmp456 tmp461 = triton_helpers.maximum(tmp460, tmp17) tmp462 = libdevice.pow(tmp461, tmp19) tmp463 = tmp459 + tmp462 tmp464 = tmp26 - tmp456 tmp465 = triton_helpers.maximum(tmp464, tmp17) tmp466 = libdevice.pow(tmp465, tmp19) tmp467 = tmp463 + tmp466 tmp468 = tmp31 - tmp456 tmp469 = triton_helpers.maximum(tmp468, tmp17) tmp470 = libdevice.pow(tmp469, tmp19) tmp471 = tmp467 + tmp470 tmp472 = tmp471 - tmp10 tmp473 = tmp472 * tmp52 tmp474 = tmp473 >= tmp17 tmp475 = tl.where(tmp474, tmp456, tmp454) tmp476 = tmp455 * tmp1 tmp477 = tmp475 + tmp476 tmp478 = tmp2 - tmp477 tmp479 = triton_helpers.maximum(tmp478, tmp17) tmp480 = libdevice.pow(tmp479, tmp19) tmp481 = tmp21 - tmp477 tmp482 = triton_helpers.maximum(tmp481, tmp17) tmp483 = libdevice.pow(tmp482, tmp19) tmp484 = tmp480 + tmp483 tmp485 = tmp26 - tmp477 tmp486 = triton_helpers.maximum(tmp485, tmp17) tmp487 = libdevice.pow(tmp486, tmp19) tmp488 = tmp484 + tmp487 tmp489 = tmp31 - tmp477 tmp490 = triton_helpers.maximum(tmp489, tmp17) tmp491 = libdevice.pow(tmp490, tmp19) tmp492 = tmp488 + tmp491 tmp493 = tmp492 - tmp10 tmp494 = tmp493 * tmp52 tmp495 = tmp494 >= tmp17 tmp496 = tl.where(tmp495, tmp477, tmp475) tmp497 = tmp476 * tmp1 tmp498 = tmp496 + tmp497 tmp499 = tmp2 - tmp498 tmp500 = triton_helpers.maximum(tmp499, tmp17) tmp501 = libdevice.pow(tmp500, tmp19) tmp502 = tmp21 - tmp498 tmp503 = triton_helpers.maximum(tmp502, tmp17) tmp504 = libdevice.pow(tmp503, tmp19) tmp505 = tmp501 + tmp504 tmp506 = tmp26 - tmp498 tmp507 = triton_helpers.maximum(tmp506, tmp17) tmp508 = libdevice.pow(tmp507, tmp19) tmp509 = tmp505 + tmp508 tmp510 = tmp31 - tmp498 tmp511 = triton_helpers.maximum(tmp510, tmp17) tmp512 = libdevice.pow(tmp511, tmp19) tmp513 = tmp509 + tmp512 tmp514 = tmp513 - tmp10 tmp515 = tmp514 * tmp52 tmp516 = tmp515 >= tmp17 tmp517 = tl.where(tmp516, tmp498, tmp496) tmp518 = tmp497 * tmp1 tmp519 = tmp517 + tmp518 tmp520 = tmp2 - tmp519 tmp521 = triton_helpers.maximum(tmp520, tmp17) tmp522 = libdevice.pow(tmp521, tmp19) tmp523 = tmp21 - tmp519 tmp524 = triton_helpers.maximum(tmp523, tmp17) tmp525 = libdevice.pow(tmp524, tmp19) tmp526 = tmp522 + tmp525 tmp527 = tmp26 - tmp519 tmp528 = triton_helpers.maximum(tmp527, tmp17) tmp529 = libdevice.pow(tmp528, tmp19) tmp530 = tmp526 + tmp529 tmp531 = tmp31 - tmp519 tmp532 = triton_helpers.maximum(tmp531, tmp17) tmp533 = libdevice.pow(tmp532, tmp19) tmp534 = tmp530 + tmp533 tmp535 = tmp534 - tmp10 tmp536 = tmp535 * tmp52 tmp537 = tmp536 >= tmp17 tmp538 = tl.where(tmp537, tmp519, tmp517) tmp539 = tmp518 * tmp1 tmp540 = tmp538 + tmp539 tmp541 = tmp2 - tmp540 tmp542 = triton_helpers.maximum(tmp541, tmp17) tmp543 = libdevice.pow(tmp542, tmp19) tmp544 = tmp21 - tmp540 tmp545 = triton_helpers.maximum(tmp544, tmp17) tmp546 = libdevice.pow(tmp545, tmp19) tmp547 = tmp543 + tmp546 tmp548 = tmp26 - tmp540 tmp549 = triton_helpers.maximum(tmp548, tmp17) tmp550 = libdevice.pow(tmp549, tmp19) tmp551 = tmp547 + tmp550 tmp552 = tmp31 - tmp540 tmp553 = triton_helpers.maximum(tmp552, tmp17) tmp554 = libdevice.pow(tmp553, tmp19) tmp555 = tmp551 + tmp554 tmp556 = tmp555 - tmp10 tmp557 = tmp556 * tmp52 tmp558 = tmp557 >= tmp17 tmp559 = tl.where(tmp558, tmp540, tmp538) tmp560 = tmp539 * tmp1 tmp561 = tmp559 + tmp560 tmp562 = tmp2 - tmp561 tmp563 = triton_helpers.maximum(tmp562, tmp17) tmp564 = libdevice.pow(tmp563, tmp19) tmp565 = tmp21 - tmp561 tmp566 = triton_helpers.maximum(tmp565, tmp17) tmp567 = libdevice.pow(tmp566, tmp19) tmp568 = tmp564 + tmp567 tmp569 = tmp26 - tmp561 tmp570 = triton_helpers.maximum(tmp569, tmp17) tmp571 = libdevice.pow(tmp570, tmp19) tmp572 = tmp568 + tmp571 tmp573 = tmp31 - tmp561 tmp574 = triton_helpers.maximum(tmp573, tmp17) tmp575 = libdevice.pow(tmp574, tmp19) tmp576 = tmp572 + tmp575 tmp577 = tmp576 - tmp10 tmp578 = tmp577 * tmp52 tmp579 = tmp578 >= tmp17 tmp580 = tl.where(tmp579, tmp561, tmp559) tmp581 = tmp560 * tmp1 tmp582 = tmp580 + tmp581 tmp583 = tmp2 - tmp582 tmp584 = triton_helpers.maximum(tmp583, tmp17) tmp585 = libdevice.pow(tmp584, tmp19) tmp586 = tmp21 - tmp582 tmp587 = triton_helpers.maximum(tmp586, tmp17) tmp588 = libdevice.pow(tmp587, tmp19) tmp589 = tmp585 + tmp588 tmp590 = tmp26 - tmp582 tmp591 = triton_helpers.maximum(tmp590, tmp17) tmp592 = libdevice.pow(tmp591, tmp19) tmp593 = tmp589 + tmp592 tmp594 = tmp31 - tmp582 tmp595 = triton_helpers.maximum(tmp594, tmp17) tmp596 = libdevice.pow(tmp595, tmp19) tmp597 = tmp593 + tmp596 tmp598 = tmp597 - tmp10 tmp599 = tmp598 * tmp52 tmp600 = tmp599 >= tmp17 tmp601 = tl.where(tmp600, tmp582, tmp580) tmp602 = tmp581 * tmp1 tmp603 = tmp601 + tmp602 tmp604 = tmp2 - tmp603 tmp605 = triton_helpers.maximum(tmp604, tmp17) tmp606 = libdevice.pow(tmp605, tmp19) tmp607 = tmp21 - tmp603 tmp608 = triton_helpers.maximum(tmp607, tmp17) tmp609 = libdevice.pow(tmp608, tmp19) tmp610 = tmp606 + tmp609 tmp611 = tmp26 - tmp603 tmp612 = triton_helpers.maximum(tmp611, tmp17) tmp613 = libdevice.pow(tmp612, tmp19) tmp614 = tmp610 + tmp613 tmp615 = tmp31 - tmp603 tmp616 = triton_helpers.maximum(tmp615, tmp17) tmp617 = libdevice.pow(tmp616, tmp19) tmp618 = tmp614 + tmp617 tl.store(out_ptr0 + (x0), tmp50, xmask) tl.store(out_ptr25 + (x0), tmp392, xmask) tl.store(in_out_ptr13 + (x0), tmp601, xmask) tl.store(out_ptr31 + (x0), tmp618, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5g/c5gg5rdgeu7tpc776dxffa7phj6rp6b46itgrarkuq7dk34hyxwl.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_94, clamp_29, truediv_29, p_m_28], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_29 => clamp_min_29 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # p_m_28 => pow_32 # sub => full_default # sub_94 => sub_94 # tau_lo_28 => where_27 # tau_m_27 => add_27 # tau_m_28 => add_28 # truediv_29 => full_default_33 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {}) # %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_28), kwargs = {}) # %clamp_min_29 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_94, 0), kwargs = {}) # %full_default_33 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_32 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_29, %full_default_33), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp11 + tmp22 tmp24 = tl.where(tmp10, tmp23, tmp11) tmp25 = tmp22 * tmp1 tmp26 = tmp24 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp9) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jp/cjpxzfoy56n2tuvk7vuc3bzbnbywmgduvlathrsklauv3qvvkwrq.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_97, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_30 => clamp_min_30 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # p_m_29 => pow_33 # sub => full_default # sub_97 => sub_97 # sum_31 => sum_31 # tau_lo_28 => where_27 # tau_lo_29 => where_28 # tau_m_27 => add_27 # tau_m_28 => add_28 # tau_m_29 => add_29 # truediv_30 => full_default_34 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {}) # %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %sub_97 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_29), kwargs = {}) # %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_97, 0), kwargs = {}) # %full_default_34 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_33 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_30, %full_default_34), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_33, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_ptr2 + (x0), xmask) tmp18 = tl.load(in_out_ptr0 + (x0), xmask) tmp19 = tl.load(in_ptr3 + (x0), xmask) tmp36 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp15 = tmp14 - tmp7 tmp16 = tmp15 * tmp10 tmp17 = tmp16 >= tmp12 tmp20 = 0.5 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = tmp22 * tmp20 tmp24 = tmp23 * tmp20 tmp25 = tmp24 * tmp20 tmp26 = tmp25 * tmp20 tmp27 = tmp26 * tmp20 tmp28 = tmp27 * tmp20 tmp29 = tmp28 * tmp20 tmp30 = tmp29 * tmp20 tmp31 = tmp18 + tmp30 tmp32 = tl.where(tmp17, tmp31, tmp18) tmp33 = tmp30 * tmp20 tmp34 = tmp32 + tmp33 tmp35 = tl.where(tmp13, tmp34, tmp32) tmp37 = tmp36 * tmp20 tmp38 = tmp33 * tmp20 tmp39 = tmp35 + tmp38 tmp40 = tmp37 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp12) tmp42 = 2.0 tmp43 = libdevice.pow(tmp41, tmp42) tmp45 = tmp44 * tmp20 tmp46 = tmp45 - tmp39 tmp47 = triton_helpers.maximum(tmp46, tmp12) tmp48 = libdevice.pow(tmp47, tmp42) tmp49 = tmp43 + tmp48 tmp51 = tmp50 * tmp20 tmp52 = tmp51 - tmp39 tmp53 = triton_helpers.maximum(tmp52, tmp12) tmp54 = libdevice.pow(tmp53, tmp42) tmp55 = tmp49 + tmp54 tmp57 = tmp56 * tmp20 tmp58 = tmp57 - tmp39 tmp59 = triton_helpers.maximum(tmp58, tmp12) tmp60 = libdevice.pow(tmp59, tmp42) tmp61 = tmp55 + tmp60 tl.store(in_out_ptr0 + (x0), tmp35, xmask) tl.store(out_ptr0 + (x0), tmp61, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iw/ciwggqrcdwaqzmajreydq3u7k6uebrg57qfoyrlc52e2h2idp5aw.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_100, clamp_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp] # Source node to ATen node mapping: # X => mul # clamp_31 => clamp_min_31 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # sub => full_default # sub_100 => sub_100 # tau_lo_30 => where_29 # tau_m_29 => add_29 # tau_m_30 => add_30 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {}) # %sub_100 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_30), kwargs = {}) # %clamp_min_31 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_100, 0), kwargs = {}) triton_poi_fused_add_clamp_div_mul_sub_where_3 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_where_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp11 + tmp24 tmp26 = tl.where(tmp10, tmp25, tmp11) tmp27 = tmp24 * tmp1 tmp28 = tmp26 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = triton_helpers.maximum(tmp29, tmp9) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yz/cyz7433eqb643zfbrbwd2jyhb3e23ohxyph5ulaajtas6nchptdl.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_103, clamp_32, truediv_32, p_m_31, sum_33], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_32 => clamp_min_32 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # p_m_31 => pow_35 # sub => full_default # sub_103 => sub_103 # sum_33 => sum_33 # tau_lo_30 => where_29 # tau_lo_31 => where_30 # tau_m_29 => add_29 # tau_m_30 => add_30 # tau_m_31 => add_31 # truediv_32 => full_default_36 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {}) # %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {}) # %sub_103 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_31), kwargs = {}) # %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_103, 0), kwargs = {}) # %full_default_36 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_35 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_32, %full_default_36), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_35, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0), xmask) tmp19 = tl.load(in_ptr2 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr3 + (x0), xmask) tmp43 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp20 = tmp19 - tmp12 tmp21 = tmp20 * tmp15 tmp22 = tmp21 >= tmp17 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp23 + tmp37 tmp39 = tl.where(tmp22, tmp38, tmp23) tmp40 = tmp37 * tmp25 tmp41 = tmp39 + tmp40 tmp42 = tl.where(tmp18, tmp41, tmp39) tmp44 = tmp43 * tmp25 tmp45 = tmp40 * tmp25 tmp46 = tmp42 + tmp45 tmp47 = tmp44 - tmp46 tmp48 = triton_helpers.maximum(tmp47, tmp17) tmp49 = libdevice.pow(tmp48, tmp1) tmp51 = tmp50 * tmp25 tmp52 = tmp51 - tmp46 tmp53 = triton_helpers.maximum(tmp52, tmp17) tmp54 = libdevice.pow(tmp53, tmp1) tmp55 = tmp49 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp46 tmp59 = triton_helpers.maximum(tmp58, tmp17) tmp60 = libdevice.pow(tmp59, tmp1) tmp61 = tmp55 + tmp60 tmp63 = tmp62 * tmp25 tmp64 = tmp63 - tmp46 tmp65 = triton_helpers.maximum(tmp64, tmp17) tmp66 = libdevice.pow(tmp65, tmp1) tmp67 = tmp61 + tmp66 tl.store(in_out_ptr0 + (x0), tmp42, xmask) tl.store(out_ptr0 + (x0), tmp67, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uv/cuv6bbjndzleisyotoon6kfbscubsd67zkk7i2i55u76f5axryo6.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, tau_m_31, tau_lo_32, dm_33, tau_m_32, sub_106], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] # Source node to ATen node mapping: # X => mul # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # sub => full_default # sub_106 => sub_106 # tau_lo_32 => where_31 # tau_m_31 => add_31 # tau_m_32 => add_32 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {}) # %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {}) # %sub_106 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_32), kwargs = {}) triton_poi_fused_add_div_mul_sub_where_5 = async_compile.triton('triton_poi_fused_add_div_mul_sub_where_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp11 + tmp26 tmp28 = tl.where(tmp10, tmp27, tmp11) tmp29 = tmp26 * tmp1 tmp30 = tmp28 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mx/cmxqfvf4zkaolzisiu4gmfyz5rcdip3u2idnyzvql5k4kesoio3u.py # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, tau_m_31, tau_lo_32, dm_33, tau_m_32, tau_lo_33, dm_34, tau_m_33, sub_109, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_112, clamp_35, truediv_35, p_m_34, sum_36, f_m_34, mul_36, tau_lo_35, dm_36, tau_m_35, sub_115, clamp_36, truediv_36, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_118, clamp_37, truediv_37, p_m_36, sum_38, tau_lo_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_34 => clamp_min_34 # clamp_35 => clamp_min_35 # clamp_36 => clamp_min_36 # clamp_37 => clamp_min_37 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # f_lo => sub_8 # f_m_34 => sub_114 # mul_36 => mul_72 # p_m_33 => pow_37 # p_m_34 => pow_38 # p_m_35 => pow_39 # p_m_36 => pow_40 # sub => full_default # sub_109 => sub_109 # sub_112 => sub_112 # sub_115 => sub_115 # sub_118 => sub_118 # sum_35 => sum_35 # sum_36 => sum_36 # sum_37 => sum_37 # sum_38 => sum_38 # tau_lo_32 => where_31 # tau_lo_33 => where_32 # tau_lo_34 => where_33 # tau_lo_35 => where_34 # tau_lo_36 => where_35 # tau_lo_37 => where_36 # tau_m_31 => add_31 # tau_m_32 => add_32 # tau_m_33 => add_33 # tau_m_34 => add_34 # tau_m_35 => add_35 # tau_m_36 => add_36 # truediv_34 => full_default_38 # truediv_35 => full_default_39 # truediv_36 => full_default_40 # truediv_37 => full_default_41 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %sub_8 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {}) # %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {}) # %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {}) # %sub_109 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_33), kwargs = {}) # %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_109, 0), kwargs = {}) # %full_default_38 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_34, %full_default_38), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [-1]), kwargs = {}) # %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {}) # %sub_112 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_34), kwargs = {}) # %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_112, 0), kwargs = {}) # %full_default_39 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_38 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_35, %full_default_39), kwargs = {}) # %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_38, [-1]), kwargs = {}) # %sub_114 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_36, 1), kwargs = {}) # %mul_72 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_114, %sub_8), kwargs = {}) # %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {}) # %sub_115 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_35), kwargs = {}) # %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_115, 0), kwargs = {}) # %full_default_40 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_39 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_36, %full_default_40), kwargs = {}) # %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_39, [-1]), kwargs = {}) # %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {}) # %sub_118 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_36), kwargs = {}) # %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_118, 0), kwargs = {}) # %full_default_41 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_40 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_37, %full_default_41), kwargs = {}) # %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_40, [-1]), kwargs = {}) # %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_ptr2 + (x0), xmask) tmp27 = tl.load(in_out_ptr0 + (x0), xmask) tmp28 = tl.load(in_ptr3 + (x0), xmask) tmp49 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp68 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp42 * tmp29 tmp44 = tmp27 + tmp43 tmp45 = tl.where(tmp26, tmp44, tmp27) tmp46 = tmp43 * tmp29 tmp47 = tmp45 + tmp46 tmp48 = tl.where(tmp22, tmp47, tmp45) tmp50 = tmp49 * tmp29 tmp51 = tmp46 * tmp29 tmp52 = tmp48 + tmp51 tmp53 = tmp50 - tmp52 tmp54 = triton_helpers.maximum(tmp53, tmp1) tmp55 = libdevice.pow(tmp54, tmp3) tmp57 = tmp56 * tmp29 tmp58 = tmp57 - tmp52 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp61 = tmp55 + tmp60 tmp63 = tmp62 * tmp29 tmp64 = tmp63 - tmp52 tmp65 = triton_helpers.maximum(tmp64, tmp1) tmp66 = libdevice.pow(tmp65, tmp3) tmp67 = tmp61 + tmp66 tmp69 = tmp68 * tmp29 tmp70 = tmp69 - tmp52 tmp71 = triton_helpers.maximum(tmp70, tmp1) tmp72 = libdevice.pow(tmp71, tmp3) tmp73 = tmp67 + tmp72 tmp74 = tmp73 - tmp17 tmp75 = tmp74 * tmp20 tmp76 = tmp75 >= tmp1 tmp77 = tl.where(tmp76, tmp52, tmp48) tmp78 = tmp51 * tmp29 tmp79 = tmp77 + tmp78 tmp80 = tmp50 - tmp79 tmp81 = triton_helpers.maximum(tmp80, tmp1) tmp82 = libdevice.pow(tmp81, tmp3) tmp83 = tmp57 - tmp79 tmp84 = triton_helpers.maximum(tmp83, tmp1) tmp85 = libdevice.pow(tmp84, tmp3) tmp86 = tmp82 + tmp85 tmp87 = tmp63 - tmp79 tmp88 = triton_helpers.maximum(tmp87, tmp1) tmp89 = libdevice.pow(tmp88, tmp3) tmp90 = tmp86 + tmp89 tmp91 = tmp69 - tmp79 tmp92 = triton_helpers.maximum(tmp91, tmp1) tmp93 = libdevice.pow(tmp92, tmp3) tmp94 = tmp90 + tmp93 tmp95 = tmp94 - tmp17 tmp96 = tmp95 * tmp20 tmp97 = tmp96 >= tmp1 tmp98 = tl.where(tmp97, tmp79, tmp77) tmp99 = tmp78 * tmp29 tmp100 = tmp98 + tmp99 tmp101 = tmp50 - tmp100 tmp102 = triton_helpers.maximum(tmp101, tmp1) tmp103 = libdevice.pow(tmp102, tmp3) tmp104 = tmp57 - tmp100 tmp105 = triton_helpers.maximum(tmp104, tmp1) tmp106 = libdevice.pow(tmp105, tmp3) tmp107 = tmp103 + tmp106 tmp108 = tmp63 - tmp100 tmp109 = triton_helpers.maximum(tmp108, tmp1) tmp110 = libdevice.pow(tmp109, tmp3) tmp111 = tmp107 + tmp110 tmp112 = tmp69 - tmp100 tmp113 = triton_helpers.maximum(tmp112, tmp1) tmp114 = libdevice.pow(tmp113, tmp3) tmp115 = tmp111 + tmp114 tmp116 = tmp115 - tmp17 tmp117 = tmp116 * tmp20 tmp118 = tmp117 >= tmp1 tmp119 = tl.where(tmp118, tmp100, tmp98) tmp120 = tmp99 * tmp29 tmp121 = tmp119 + tmp120 tmp122 = tmp50 - tmp121 tmp123 = triton_helpers.maximum(tmp122, tmp1) tmp124 = libdevice.pow(tmp123, tmp3) tmp125 = tmp57 - tmp121 tmp126 = triton_helpers.maximum(tmp125, tmp1) tmp127 = libdevice.pow(tmp126, tmp3) tmp128 = tmp124 + tmp127 tmp129 = tmp63 - tmp121 tmp130 = triton_helpers.maximum(tmp129, tmp1) tmp131 = libdevice.pow(tmp130, tmp3) tmp132 = tmp128 + tmp131 tmp133 = tmp69 - tmp121 tmp134 = triton_helpers.maximum(tmp133, tmp1) tmp135 = libdevice.pow(tmp134, tmp3) tmp136 = tmp132 + tmp135 tmp137 = tmp136 - tmp17 tmp138 = tmp137 * tmp20 tmp139 = tmp138 >= tmp1 tmp140 = tl.where(tmp139, tmp121, tmp119) tl.store(in_out_ptr3 + (x0), tmp140, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qw/cqwjirql6abn7imghsni2jlkuybk4xkokodof2rpil4tssyex4uw.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_121, clamp_38, truediv_38, p_m_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_38 => clamp_min_38 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # p_m_37 => pow_41 # sub => full_default # sub_121 => sub_121 # tau_m_37 => add_37 # truediv_38 => full_default_42 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {}) # %sub_121 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_37), kwargs = {}) # %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_121, 0), kwargs = {}) # %full_default_42 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_41 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_38, %full_default_42), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_7 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp3 + tmp24 tmp26 = tmp2 - tmp25 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3q/c3qnhz5l3skrhqr43bpfxlmkw6kumz7pxqtyenon3vddp3cudnfx.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # tau_lo_38 => where_37 # tau_m_37 => add_37 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {}) # %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {}) triton_poi_fused_add_div_where_8 = async_compile.triton('triton_poi_fused_add_div_where_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_out_ptr0 + (x0), xmask) tmp15 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp14 + tmp36 tmp38 = tl.where(tmp13, tmp37, tmp14) tl.store(in_out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/65/c65c6rqbfpxnnf52lttfmjffp6c57rq5vh5zishetax476bzxbyn.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_124, clamp_39, truediv_39, p_m_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_39 => clamp_min_39 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # p_m_38 => pow_42 # sub => full_default # sub_124 => sub_124 # tau_m_38 => add_38 # truediv_39 => full_default_43 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {}) # %sub_124 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_38), kwargs = {}) # %clamp_min_39 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_124, 0), kwargs = {}) # %full_default_43 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_42 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_39, %full_default_43), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_9 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp3 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = 0.0 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hc/chcgwhxqzahejjnkomdddn3fpfvhgpal4i42dvbnrcw5fjtsuuip.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # tau_lo_39 => where_38 # tau_m_38 => add_38 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {}) # %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {}) triton_poi_fused_add_div_where_10 = async_compile.triton('triton_poi_fused_add_div_where_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_out_ptr0 + (x0), xmask) tmp15 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp36 * tmp16 tmp38 = tmp14 + tmp37 tmp39 = tl.where(tmp13, tmp38, tmp14) tl.store(in_out_ptr0 + (x0), tmp39, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hk/chkgkmcorveaoty2aubj4aiqs7c4icysi6i7uztapg7qoahqwomz.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_127, clamp_40], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] # Source node to ATen node mapping: # X => mul # clamp_40 => clamp_min_40 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # sub => full_default # sub_127 => sub_127 # tau_m_39 => add_39 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {}) # %sub_127 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_39), kwargs = {}) # %clamp_min_40 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_127, 0), kwargs = {}) triton_poi_fused_add_clamp_div_mul_sub_11 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp3 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = 0.0 tmp30 = triton_helpers.maximum(tmp28, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6j/c6jx7jobgsfa3dnpqdh4py4h2yjwqcnyfrnfxf4y2pamiu6cwdpl.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # tau_lo_40 => where_39 # tau_m_39 => add_39 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {}) # %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {}) triton_poi_fused_add_div_where_12 = async_compile.triton('triton_poi_fused_add_div_where_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0), xmask) tmp19 = tl.load(in_out_ptr0 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp19 + tmp43 tmp45 = tl.where(tmp18, tmp44, tmp19) tl.store(in_out_ptr0 + (x0), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dg/cdgzbjixzgjbzoonwf327zqettjzik6jiaut4vfckxwzzrjpw4ci.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_130, clamp_41], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] # Source node to ATen node mapping: # X => mul # clamp_41 => clamp_min_41 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # sub => full_default # sub_130 => sub_130 # tau_m_40 => add_40 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {}) # %sub_130 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_40), kwargs = {}) # %clamp_min_41 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_130, 0), kwargs = {}) triton_poi_fused_add_clamp_div_mul_sub_13 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp3 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = 0.0 tmp31 = triton_helpers.maximum(tmp29, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f2/cf2smd7msnlfnwkswpdfa2pro4t25uk7k6sqqtuhugmitppzjw2h.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # tau_lo_41 => where_40 # tau_m_40 => add_40 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {}) # %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {}) triton_poi_fused_add_div_where_14 = async_compile.triton('triton_poi_fused_add_div_where_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0), xmask) tmp19 = tl.load(in_out_ptr0 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp43 * tmp21 tmp45 = tmp19 + tmp44 tmp46 = tl.where(tmp18, tmp45, tmp19) tl.store(in_out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m6/cm6g7ftzm55nml3oy3bwbqmue7bg6inetry6yvug7w4kb6ev5rty.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_133], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # sub => full_default # sub_133 => sub_133 # tau_m_41 => add_41 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {}) # %sub_133 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_41), kwargs = {}) triton_poi_fused_add_div_mul_sub_15 = async_compile.triton('triton_poi_fused_add_div_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp3 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/it/citgidtovhh7ieg5w6kgdaztr74d5qimxq5xchpfcretvgwraeaz.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, tau_lo_42], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # tau_lo_42 => where_41 # tau_m_41 => add_41 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {}) # %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {}) triton_poi_fused_add_div_where_16 = async_compile.triton('triton_poi_fused_add_div_where_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp23 + tmp49 tmp51 = tl.where(tmp22, tmp50, tmp23) tl.store(in_out_ptr0 + (x0), tmp51, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ow/cowrvlpr3aq5lvglsdero5kvdkcaqmnxh7ylqy6wg5asbxkwbugf.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_136], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # dm_43 => div_42 # sub => full_default # sub_136 => sub_136 # tau_m_42 => add_42 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {}) # %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {}) # %sub_136 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_42), kwargs = {}) triton_poi_fused_add_div_mul_sub_17 = async_compile.triton('triton_poi_fused_add_div_mul_sub_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp3 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z4/cz4ddiop67cjhvnymyeyxeouq7iyjrkpudsvnur2jqxca35zlu35.py # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, tau_lo_43], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # dm_43 => div_42 # tau_lo_43 => where_42 # tau_m_42 => add_42 # Graph fragment: # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {}) # %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {}) # %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {}) triton_poi_fused_add_div_where_18 = async_compile.triton('triton_poi_fused_add_div_where_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp23 + tmp50 tmp52 = tl.where(tmp22, tmp51, tmp23) tl.store(in_out_ptr0 + (x0), tmp52, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xp/cxphrrfdmolvfd4qixifhmbpdrlsdb6np4qopmkccumu7xs7u2vv.py # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_139], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # dm_43 => div_42 # dm_44 => div_43 # sub => full_default # sub_139 => sub_139 # tau_m_43 => add_43 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {}) # %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {}) # %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {}) # %sub_139 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_43), kwargs = {}) triton_poi_fused_add_div_mul_sub_19 = async_compile.triton('triton_poi_fused_add_div_mul_sub_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp29 * tmp1 tmp31 = tmp3 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zm/czmonnbfj2wl7xmhu3q3o4tfezemuxon2fwrwwbnaeehze54zmfo.py # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, tau_lo_44, dm_45, tau_m_44, sub_142, clamp_45, truediv_45, p_m_44, sum_46, f_m_44, mul_46, tau_lo_45, dm_46, tau_m_45, sub_145, clamp_46, truediv_46, p_m_45, sum_47, f_m_45, mul_47, tau_lo_46, dm_47, tau_m_46, sub_148, clamp_47, truediv_47, p_m_46, sum_48, f_m_46, tau_lo_47, dm_48, tau_m_47, sub_151, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_154, clamp_49, truediv_49, p_m_48, sum_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_45 => clamp_min_45 # clamp_46 => clamp_min_46 # clamp_47 => clamp_min_47 # clamp_48 => clamp_min_48 # clamp_49 => clamp_min_49 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # dm_43 => div_42 # dm_44 => div_43 # dm_45 => div_44 # dm_46 => div_45 # dm_47 => div_46 # dm_48 => div_47 # dm_49 => div_48 # f_lo => sub_8 # f_m_44 => sub_144 # f_m_45 => sub_147 # f_m_46 => sub_150 # mul_46 => mul_92 # mul_47 => mul_94 # p_m_44 => pow_48 # p_m_45 => pow_49 # p_m_46 => pow_50 # p_m_47 => pow_51 # p_m_48 => pow_52 # sub => full_default # sub_142 => sub_142 # sub_145 => sub_145 # sub_148 => sub_148 # sub_151 => sub_151 # sub_154 => sub_154 # sum_46 => sum_46 # sum_47 => sum_47 # sum_48 => sum_48 # sum_49 => sum_49 # sum_50 => sum_50 # tau_lo_44 => where_43 # tau_lo_45 => where_44 # tau_lo_46 => where_45 # tau_lo_47 => where_46 # tau_lo_48 => where_47 # tau_m_43 => add_43 # tau_m_44 => add_44 # tau_m_45 => add_45 # tau_m_46 => add_46 # tau_m_47 => add_47 # tau_m_48 => add_48 # truediv_45 => full_default_49 # truediv_46 => full_default_50 # truediv_47 => full_default_51 # truediv_48 => full_default_52 # truediv_49 => full_default_53 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %sub_8 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {}) # %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {}) # %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {}) # %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {}) # %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {}) # %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {}) # %sub_142 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_44), kwargs = {}) # %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_142, 0), kwargs = {}) # %full_default_49 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_48 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_45, %full_default_49), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_48, [-1]), kwargs = {}) # %sub_144 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_46, 1), kwargs = {}) # %mul_92 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_144, %sub_8), kwargs = {}) # %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {}) # %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {}) # %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {}) # %sub_145 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_45), kwargs = {}) # %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_145, 0), kwargs = {}) # %full_default_50 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_49 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_46, %full_default_50), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_49, [-1]), kwargs = {}) # %sub_147 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_47, 1), kwargs = {}) # %mul_94 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_147, %sub_8), kwargs = {}) # %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {}) # %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {}) # %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {}) # %sub_148 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_46), kwargs = {}) # %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_148, 0), kwargs = {}) # %full_default_51 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_50 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_47, %full_default_51), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_50, [-1]), kwargs = {}) # %sub_150 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_48, 1), kwargs = {}) # %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {}) # %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {}) # %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {}) # %sub_151 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_47), kwargs = {}) # %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_151, 0), kwargs = {}) # %full_default_52 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_51 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_48, %full_default_52), kwargs = {}) # %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_51, [-1]), kwargs = {}) # %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {}) # %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {}) # %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {}) # %sub_154 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_48), kwargs = {}) # %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_154, 0), kwargs = {}) # %full_default_53 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_52 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_49, %full_default_53), kwargs = {}) # %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_52, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr3, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp56 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp67 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp73 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp50 * tmp25 tmp52 = tmp23 + tmp51 tmp53 = tl.where(tmp22, tmp52, tmp23) tmp54 = tmp51 * tmp25 tmp55 = tmp53 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp55 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp62 = tmp61 * tmp25 tmp63 = tmp62 - tmp55 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp25 tmp69 = tmp68 - tmp55 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tmp74 = tmp73 * tmp25 tmp75 = tmp74 - tmp55 tmp76 = triton_helpers.maximum(tmp75, tmp1) tmp77 = libdevice.pow(tmp76, tmp3) tmp78 = tmp72 + tmp77 tmp79 = tmp78 - tmp17 tmp80 = tmp79 * tmp20 tmp81 = tmp80 >= tmp1 tmp82 = tl.where(tmp81, tmp55, tmp53) tmp83 = tmp54 * tmp25 tmp84 = tmp82 + tmp83 tmp85 = tmp57 - tmp84 tmp86 = triton_helpers.maximum(tmp85, tmp1) tmp87 = libdevice.pow(tmp86, tmp3) tmp88 = tmp62 - tmp84 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp68 - tmp84 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp74 - tmp84 tmp97 = triton_helpers.maximum(tmp96, tmp1) tmp98 = libdevice.pow(tmp97, tmp3) tmp99 = tmp95 + tmp98 tmp100 = tmp99 - tmp17 tmp101 = tmp100 * tmp20 tmp102 = tmp101 >= tmp1 tmp103 = tl.where(tmp102, tmp84, tmp82) tmp104 = tmp83 * tmp25 tmp105 = tmp103 + tmp104 tmp106 = tmp57 - tmp105 tmp107 = triton_helpers.maximum(tmp106, tmp1) tmp108 = libdevice.pow(tmp107, tmp3) tmp109 = tmp62 - tmp105 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp68 - tmp105 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp74 - tmp105 tmp118 = triton_helpers.maximum(tmp117, tmp1) tmp119 = libdevice.pow(tmp118, tmp3) tmp120 = tmp116 + tmp119 tmp121 = tmp120 - tmp17 tmp122 = tmp121 * tmp20 tmp123 = tmp122 >= tmp1 tmp124 = tl.where(tmp123, tmp105, tmp103) tmp125 = tmp104 * tmp25 tmp126 = tmp124 + tmp125 tmp127 = tmp57 - tmp126 tmp128 = triton_helpers.maximum(tmp127, tmp1) tmp129 = libdevice.pow(tmp128, tmp3) tmp130 = tmp62 - tmp126 tmp131 = triton_helpers.maximum(tmp130, tmp1) tmp132 = libdevice.pow(tmp131, tmp3) tmp133 = tmp129 + tmp132 tmp134 = tmp68 - tmp126 tmp135 = triton_helpers.maximum(tmp134, tmp1) tmp136 = libdevice.pow(tmp135, tmp3) tmp137 = tmp133 + tmp136 tmp138 = tmp74 - tmp126 tmp139 = triton_helpers.maximum(tmp138, tmp1) tmp140 = libdevice.pow(tmp139, tmp3) tmp141 = tmp137 + tmp140 tmp142 = tmp141 - tmp17 tmp143 = tmp142 * tmp20 tmp144 = tmp143 >= tmp1 tmp145 = tl.where(tmp144, tmp126, tmp124) tmp146 = tmp125 * tmp25 tmp147 = tmp145 + tmp146 tmp148 = tmp57 - tmp147 tmp149 = triton_helpers.maximum(tmp148, tmp1) tmp150 = libdevice.pow(tmp149, tmp3) tmp151 = tmp62 - tmp147 tmp152 = triton_helpers.maximum(tmp151, tmp1) tmp153 = libdevice.pow(tmp152, tmp3) tmp154 = tmp150 + tmp153 tmp155 = tmp68 - tmp147 tmp156 = triton_helpers.maximum(tmp155, tmp1) tmp157 = libdevice.pow(tmp156, tmp3) tmp158 = tmp154 + tmp157 tmp159 = tmp74 - tmp147 tmp160 = triton_helpers.maximum(tmp159, tmp1) tmp161 = libdevice.pow(tmp160, tmp3) tmp162 = tmp158 + tmp161 tl.store(out_ptr3 + (x0), tmp104, xmask) tl.store(in_out_ptr2 + (x0), tmp145, xmask) tl.store(out_ptr5 + (x0), tmp162, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5g/c5gebprtdgvvqtipwon2cm42v3mfxpaj2ewcnnpyam53vh5neggv.py # Topologically Sorted Source Nodes: [sub, X, dm_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_157, clamp_50, truediv_50, p_m_49], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_50 => clamp_min_50 # dm_48 => div_47 # dm_49 => div_48 # dm_50 => div_49 # p_m_49 => pow_53 # sub => full_default # sub_157 => sub_157 # tau_lo_49 => where_48 # tau_m_48 => add_48 # tau_m_49 => add_49 # truediv_50 => full_default_54 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=51] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {}) # %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {}) # %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {}) # %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {}) # %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {}) # %sub_157 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_49), kwargs = {}) # %clamp_min_50 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_157, 0), kwargs = {}) # %full_default_54 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_53 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_50, %full_default_54), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_where_21 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp11 + tmp14 tmp16 = tl.where(tmp10, tmp15, tmp11) tmp17 = tmp14 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = tmp2 - tmp18 tmp20 = triton_helpers.maximum(tmp19, tmp9) tmp21 = 2.0 tmp22 = libdevice.pow(tmp20, tmp21) tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rg/crg5ev6hqq4wiy6i5dbidecwhxksw45gdajpljla4pfgiiihufu3.py # Topologically Sorted Source Nodes: [p_m_50], Original ATen: [aten.div] # Source node to ATen node mapping: # p_m_50 => div_50 # Graph fragment: # %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_53, %unsqueeze_50), kwargs = {}) triton_poi_fused_div_22 = async_compile.triton('triton_poi_fused_div_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_22(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf43 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf58 = reinterpret_tensor(buf57, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf57 # reuse buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1, sub, X, sub_1, max_val_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_10, clamp_1, truediv_1, p_m, sum_2, sub_6, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_13, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_3, tau_lo_2, dm_3, tau_m_2, sub_16, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_4, tau_lo_3, dm_4, tau_m_3, sub_19, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_5, tau_lo_4, dm_5, tau_m_4, sub_22, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_6, tau_lo_5, dm_6, tau_m_5, sub_25, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_7, tau_lo_6, dm_7, tau_m_6, sub_28, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_8, tau_lo_7, dm_8, tau_m_7, sub_31, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_9, tau_lo_8, dm_9, tau_m_8, sub_34, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_10, tau_lo_9, dm_10, tau_m_9, sub_37, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_11, tau_lo_10, dm_11, tau_m_10, sub_40, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_12, tau_lo_11, dm_12, tau_m_11, sub_43, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_13, tau_lo_12, dm_13, tau_m_12, sub_46, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_14, tau_lo_13, dm_14, tau_m_13, sub_49, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_15, tau_lo_14, dm_15, tau_m_14, sub_52, clamp_15, truediv_15, p_m_14, sum_16, f_m_14, mul_16, tau_lo_15, dm_16, tau_m_15, sub_55, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, mul_17, tau_lo_16, dm_17, tau_m_16, sub_58, clamp_17, truediv_17, p_m_16, sum_18, f_m_16, mul_18, tau_lo_17, dm_18, tau_m_17, sub_61, clamp_18, truediv_18, p_m_17, sum_19, f_m_17, tau_lo_18, dm_19, tau_m_18, sub_64, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_67, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_70, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_73, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_76, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_79, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_82, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_85, clamp_26, truediv_26, p_m_25, sum_27, tau_lo_26, dm_27, tau_m_26, sub_88, clamp_27, truediv_27, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_91, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.max, aten.sub, aten.mul, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0.run(buf58, arg0_1, buf1, buf43, buf59, 64, grid=grid(64), stream=stream0) buf60 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_94, clamp_29, truediv_29, p_m_28], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_where_1.run(arg0_1, buf59, buf1, buf58, buf43, buf60, 256, grid=grid(256), stream=stream0) buf61 = buf58; del buf58 # reuse buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_97, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2.run(buf61, buf60, buf1, buf59, buf43, arg0_1, buf62, 64, grid=grid(64), stream=stream0) buf63 = buf60; del buf60 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_100, clamp_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp] triton_poi_fused_add_clamp_div_mul_sub_where_3.run(arg0_1, buf62, buf1, buf61, buf43, buf63, 256, grid=grid(256), stream=stream0) buf64 = buf61; del buf61 # reuse buf65 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_103, clamp_32, truediv_32, p_m_31, sum_33], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4.run(buf64, buf63, buf1, buf62, buf43, arg0_1, buf65, 64, grid=grid(64), stream=stream0) buf66 = buf63; del buf63 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, tau_m_31, tau_lo_32, dm_33, tau_m_32, sub_106], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] triton_poi_fused_add_div_mul_sub_where_5.run(arg0_1, buf65, buf1, buf64, buf43, buf66, 256, grid=grid(256), stream=stream0) buf67 = buf64; del buf64 # reuse buf74 = buf62; del buf62 # reuse buf75 = reinterpret_tensor(buf74, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf74 # reuse # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, tau_m_31, tau_lo_32, dm_33, tau_m_32, tau_lo_33, dm_34, tau_m_33, sub_109, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_112, clamp_35, truediv_35, p_m_34, sum_36, f_m_34, mul_36, tau_lo_35, dm_36, tau_m_35, sub_115, clamp_36, truediv_36, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_118, clamp_37, truediv_37, p_m_36, sum_38, tau_lo_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6.run(buf67, buf75, buf66, buf1, buf65, buf43, arg0_1, 64, grid=grid(64), stream=stream0) buf76 = buf66; del buf66 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_121, clamp_38, truediv_38, p_m_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_7.run(arg0_1, buf75, buf43, buf76, 256, grid=grid(256), stream=stream0) buf77 = buf75; del buf75 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_8.run(buf77, buf76, buf1, buf43, 64, grid=grid(64), stream=stream0) buf78 = buf76; del buf76 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_124, clamp_39, truediv_39, p_m_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_9.run(arg0_1, buf77, buf43, buf78, 256, grid=grid(256), stream=stream0) buf79 = buf77; del buf77 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_10.run(buf79, buf78, buf1, buf43, 64, grid=grid(64), stream=stream0) buf80 = buf78; del buf78 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_127, clamp_40], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] triton_poi_fused_add_clamp_div_mul_sub_11.run(arg0_1, buf79, buf43, buf80, 256, grid=grid(256), stream=stream0) buf81 = buf79; del buf79 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_12.run(buf81, buf80, buf1, buf43, 64, grid=grid(64), stream=stream0) buf82 = buf80; del buf80 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_130, clamp_41], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] triton_poi_fused_add_clamp_div_mul_sub_13.run(arg0_1, buf81, buf43, buf82, 256, grid=grid(256), stream=stream0) buf83 = buf81; del buf81 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_14.run(buf83, buf82, buf1, buf43, 64, grid=grid(64), stream=stream0) buf84 = buf82; del buf82 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_133], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_15.run(arg0_1, buf83, buf43, buf84, 256, grid=grid(256), stream=stream0) buf85 = buf83; del buf83 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, tau_lo_42], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_16.run(buf85, buf84, buf1, buf43, 64, grid=grid(64), stream=stream0) buf86 = buf84; del buf84 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_136], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_17.run(arg0_1, buf85, buf43, buf86, 256, grid=grid(256), stream=stream0) buf87 = buf85; del buf85 # reuse # Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, tau_lo_43], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_18.run(buf87, buf86, buf1, buf43, 64, grid=grid(64), stream=stream0) buf88 = buf86; del buf86 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_139], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_19.run(arg0_1, buf87, buf43, buf88, 256, grid=grid(256), stream=stream0) buf89 = buf87; del buf87 # reuse buf95 = buf67; del buf67 # reuse buf97 = buf65; del buf65 # reuse buf98 = reinterpret_tensor(buf97, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf97 # reuse buf99 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, tau_lo_44, dm_45, tau_m_44, sub_142, clamp_45, truediv_45, p_m_44, sum_46, f_m_44, mul_46, tau_lo_45, dm_46, tau_m_45, sub_145, clamp_46, truediv_46, p_m_45, sum_47, f_m_45, mul_47, tau_lo_46, dm_47, tau_m_46, sub_148, clamp_47, truediv_47, p_m_46, sum_48, f_m_46, tau_lo_47, dm_48, tau_m_47, sub_151, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_154, clamp_49, truediv_49, p_m_48, sum_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20.run(buf89, buf98, buf88, buf1, buf43, arg0_1, buf95, buf99, 64, grid=grid(64), stream=stream0) del buf43 del buf89 buf100 = buf88; del buf88 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_157, clamp_50, truediv_50, p_m_49], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_where_21.run(arg0_1, buf99, buf1, buf98, buf95, buf100, 256, grid=grid(256), stream=stream0) del arg0_1 del buf1 del buf95 del buf98 del buf99 buf101 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_m_50], Original ATen: [aten.div] triton_poi_fused_div_22.run(buf100, buf101, 256, grid=grid(256), stream=stream0) del buf100 return (buf101, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import torch import torch.nn as nn def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True): """alpha-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. This function is differentiable with respect to both X and alpha. Parameters ---------- X : torch.Tensor The input tensor. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you want softmax, we recommend `torch.nn.softmax`. dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] max_val, _ = X.max(dim=dim, keepdim=True) X = X * (alpha - 1) max_val = max_val * (alpha - 1) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class EntmaxBisect(nn.Module): def __init__(self, alpha=1.5, dim=-1, n_iter=50): """alpha-entmax: normalizing sparse map (a la softmax) via bisection. Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. Parameters ---------- alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax; and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`; if you want softmax, we recommend `torch.nn.softmax`. dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter self.alpha = alpha super().__init__() def forward(self, X): return entmax_bisect(X, alpha=self.alpha, dim=self.dim, n_iter=self .n_iter) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr13, in_ptr0, out_ptr0, out_ptr25, out_ptr31, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = triton_helpers.maximum(tmp0, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp1 tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp9 - tmp1 tmp13 = tmp12 - tmp11 tmp14 = tmp13 * tmp1 tmp15 = tmp11 + tmp14 tmp16 = tmp2 - tmp15 tmp17 = 0.0 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = 2.0 tmp20 = libdevice.pow(tmp18, tmp19) tmp21 = tmp3 * tmp1 tmp22 = tmp21 - tmp15 tmp23 = triton_helpers.maximum(tmp22, tmp17) tmp24 = libdevice.pow(tmp23, tmp19) tmp25 = tmp20 + tmp24 tmp26 = tmp5 * tmp1 tmp27 = tmp26 - tmp15 tmp28 = triton_helpers.maximum(tmp27, tmp17) tmp29 = libdevice.pow(tmp28, tmp19) tmp30 = tmp25 + tmp29 tmp31 = tmp7 * tmp1 tmp32 = tmp31 - tmp15 tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = libdevice.pow(tmp33, tmp19) tmp35 = tmp30 + tmp34 tmp36 = tmp2 - tmp11 tmp37 = triton_helpers.maximum(tmp36, tmp17) tmp38 = libdevice.pow(tmp37, tmp19) tmp39 = tmp21 - tmp11 tmp40 = triton_helpers.maximum(tmp39, tmp17) tmp41 = libdevice.pow(tmp40, tmp19) tmp42 = tmp38 + tmp41 tmp43 = tmp26 - tmp11 tmp44 = triton_helpers.maximum(tmp43, tmp17) tmp45 = libdevice.pow(tmp44, tmp19) tmp46 = tmp42 + tmp45 tmp47 = tmp31 - tmp11 tmp48 = triton_helpers.maximum(tmp47, tmp17) tmp49 = libdevice.pow(tmp48, tmp19) tmp50 = tmp46 + tmp49 tmp51 = tmp35 - tmp10 tmp52 = tmp50 - tmp10 tmp53 = tmp51 * tmp52 tmp54 = tmp53 >= tmp17 tmp55 = tl.where(tmp54, tmp15, tmp11) tmp56 = tmp14 * tmp1 tmp57 = tmp55 + tmp56 tmp58 = tmp2 - tmp57 tmp59 = triton_helpers.maximum(tmp58, tmp17) tmp60 = libdevice.pow(tmp59, tmp19) tmp61 = tmp21 - tmp57 tmp62 = triton_helpers.maximum(tmp61, tmp17) tmp63 = libdevice.pow(tmp62, tmp19) tmp64 = tmp60 + tmp63 tmp65 = tmp26 - tmp57 tmp66 = triton_helpers.maximum(tmp65, tmp17) tmp67 = libdevice.pow(tmp66, tmp19) tmp68 = tmp64 + tmp67 tmp69 = tmp31 - tmp57 tmp70 = triton_helpers.maximum(tmp69, tmp17) tmp71 = libdevice.pow(tmp70, tmp19) tmp72 = tmp68 + tmp71 tmp73 = tmp72 - tmp10 tmp74 = tmp73 * tmp52 tmp75 = tmp74 >= tmp17 tmp76 = tl.where(tmp75, tmp57, tmp55) tmp77 = tmp56 * tmp1 tmp78 = tmp76 + tmp77 tmp79 = tmp2 - tmp78 tmp80 = triton_helpers.maximum(tmp79, tmp17) tmp81 = libdevice.pow(tmp80, tmp19) tmp82 = tmp21 - tmp78 tmp83 = triton_helpers.maximum(tmp82, tmp17) tmp84 = libdevice.pow(tmp83, tmp19) tmp85 = tmp81 + tmp84 tmp86 = tmp26 - tmp78 tmp87 = triton_helpers.maximum(tmp86, tmp17) tmp88 = libdevice.pow(tmp87, tmp19) tmp89 = tmp85 + tmp88 tmp90 = tmp31 - tmp78 tmp91 = triton_helpers.maximum(tmp90, tmp17) tmp92 = libdevice.pow(tmp91, tmp19) tmp93 = tmp89 + tmp92 tmp94 = tmp93 - tmp10 tmp95 = tmp94 * tmp52 tmp96 = tmp95 >= tmp17 tmp97 = tl.where(tmp96, tmp78, tmp76) tmp98 = tmp77 * tmp1 tmp99 = tmp97 + tmp98 tmp100 = tmp2 - tmp99 tmp101 = triton_helpers.maximum(tmp100, tmp17) tmp102 = libdevice.pow(tmp101, tmp19) tmp103 = tmp21 - tmp99 tmp104 = triton_helpers.maximum(tmp103, tmp17) tmp105 = libdevice.pow(tmp104, tmp19) tmp106 = tmp102 + tmp105 tmp107 = tmp26 - tmp99 tmp108 = triton_helpers.maximum(tmp107, tmp17) tmp109 = libdevice.pow(tmp108, tmp19) tmp110 = tmp106 + tmp109 tmp111 = tmp31 - tmp99 tmp112 = triton_helpers.maximum(tmp111, tmp17) tmp113 = libdevice.pow(tmp112, tmp19) tmp114 = tmp110 + tmp113 tmp115 = tmp114 - tmp10 tmp116 = tmp115 * tmp52 tmp117 = tmp116 >= tmp17 tmp118 = tl.where(tmp117, tmp99, tmp97) tmp119 = tmp98 * tmp1 tmp120 = tmp118 + tmp119 tmp121 = tmp2 - tmp120 tmp122 = triton_helpers.maximum(tmp121, tmp17) tmp123 = libdevice.pow(tmp122, tmp19) tmp124 = tmp21 - tmp120 tmp125 = triton_helpers.maximum(tmp124, tmp17) tmp126 = libdevice.pow(tmp125, tmp19) tmp127 = tmp123 + tmp126 tmp128 = tmp26 - tmp120 tmp129 = triton_helpers.maximum(tmp128, tmp17) tmp130 = libdevice.pow(tmp129, tmp19) tmp131 = tmp127 + tmp130 tmp132 = tmp31 - tmp120 tmp133 = triton_helpers.maximum(tmp132, tmp17) tmp134 = libdevice.pow(tmp133, tmp19) tmp135 = tmp131 + tmp134 tmp136 = tmp135 - tmp10 tmp137 = tmp136 * tmp52 tmp138 = tmp137 >= tmp17 tmp139 = tl.where(tmp138, tmp120, tmp118) tmp140 = tmp119 * tmp1 tmp141 = tmp139 + tmp140 tmp142 = tmp2 - tmp141 tmp143 = triton_helpers.maximum(tmp142, tmp17) tmp144 = libdevice.pow(tmp143, tmp19) tmp145 = tmp21 - tmp141 tmp146 = triton_helpers.maximum(tmp145, tmp17) tmp147 = libdevice.pow(tmp146, tmp19) tmp148 = tmp144 + tmp147 tmp149 = tmp26 - tmp141 tmp150 = triton_helpers.maximum(tmp149, tmp17) tmp151 = libdevice.pow(tmp150, tmp19) tmp152 = tmp148 + tmp151 tmp153 = tmp31 - tmp141 tmp154 = triton_helpers.maximum(tmp153, tmp17) tmp155 = libdevice.pow(tmp154, tmp19) tmp156 = tmp152 + tmp155 tmp157 = tmp156 - tmp10 tmp158 = tmp157 * tmp52 tmp159 = tmp158 >= tmp17 tmp160 = tl.where(tmp159, tmp141, tmp139) tmp161 = tmp140 * tmp1 tmp162 = tmp160 + tmp161 tmp163 = tmp2 - tmp162 tmp164 = triton_helpers.maximum(tmp163, tmp17) tmp165 = libdevice.pow(tmp164, tmp19) tmp166 = tmp21 - tmp162 tmp167 = triton_helpers.maximum(tmp166, tmp17) tmp168 = libdevice.pow(tmp167, tmp19) tmp169 = tmp165 + tmp168 tmp170 = tmp26 - tmp162 tmp171 = triton_helpers.maximum(tmp170, tmp17) tmp172 = libdevice.pow(tmp171, tmp19) tmp173 = tmp169 + tmp172 tmp174 = tmp31 - tmp162 tmp175 = triton_helpers.maximum(tmp174, tmp17) tmp176 = libdevice.pow(tmp175, tmp19) tmp177 = tmp173 + tmp176 tmp178 = tmp177 - tmp10 tmp179 = tmp178 * tmp52 tmp180 = tmp179 >= tmp17 tmp181 = tl.where(tmp180, tmp162, tmp160) tmp182 = tmp161 * tmp1 tmp183 = tmp181 + tmp182 tmp184 = tmp2 - tmp183 tmp185 = triton_helpers.maximum(tmp184, tmp17) tmp186 = libdevice.pow(tmp185, tmp19) tmp187 = tmp21 - tmp183 tmp188 = triton_helpers.maximum(tmp187, tmp17) tmp189 = libdevice.pow(tmp188, tmp19) tmp190 = tmp186 + tmp189 tmp191 = tmp26 - tmp183 tmp192 = triton_helpers.maximum(tmp191, tmp17) tmp193 = libdevice.pow(tmp192, tmp19) tmp194 = tmp190 + tmp193 tmp195 = tmp31 - tmp183 tmp196 = triton_helpers.maximum(tmp195, tmp17) tmp197 = libdevice.pow(tmp196, tmp19) tmp198 = tmp194 + tmp197 tmp199 = tmp198 - tmp10 tmp200 = tmp199 * tmp52 tmp201 = tmp200 >= tmp17 tmp202 = tl.where(tmp201, tmp183, tmp181) tmp203 = tmp182 * tmp1 tmp204 = tmp202 + tmp203 tmp205 = tmp2 - tmp204 tmp206 = triton_helpers.maximum(tmp205, tmp17) tmp207 = libdevice.pow(tmp206, tmp19) tmp208 = tmp21 - tmp204 tmp209 = triton_helpers.maximum(tmp208, tmp17) tmp210 = libdevice.pow(tmp209, tmp19) tmp211 = tmp207 + tmp210 tmp212 = tmp26 - tmp204 tmp213 = triton_helpers.maximum(tmp212, tmp17) tmp214 = libdevice.pow(tmp213, tmp19) tmp215 = tmp211 + tmp214 tmp216 = tmp31 - tmp204 tmp217 = triton_helpers.maximum(tmp216, tmp17) tmp218 = libdevice.pow(tmp217, tmp19) tmp219 = tmp215 + tmp218 tmp220 = tmp219 - tmp10 tmp221 = tmp220 * tmp52 tmp222 = tmp221 >= tmp17 tmp223 = tl.where(tmp222, tmp204, tmp202) tmp224 = tmp203 * tmp1 tmp225 = tmp223 + tmp224 tmp226 = tmp2 - tmp225 tmp227 = triton_helpers.maximum(tmp226, tmp17) tmp228 = libdevice.pow(tmp227, tmp19) tmp229 = tmp21 - tmp225 tmp230 = triton_helpers.maximum(tmp229, tmp17) tmp231 = libdevice.pow(tmp230, tmp19) tmp232 = tmp228 + tmp231 tmp233 = tmp26 - tmp225 tmp234 = triton_helpers.maximum(tmp233, tmp17) tmp235 = libdevice.pow(tmp234, tmp19) tmp236 = tmp232 + tmp235 tmp237 = tmp31 - tmp225 tmp238 = triton_helpers.maximum(tmp237, tmp17) tmp239 = libdevice.pow(tmp238, tmp19) tmp240 = tmp236 + tmp239 tmp241 = tmp240 - tmp10 tmp242 = tmp241 * tmp52 tmp243 = tmp242 >= tmp17 tmp244 = tl.where(tmp243, tmp225, tmp223) tmp245 = tmp224 * tmp1 tmp246 = tmp244 + tmp245 tmp247 = tmp2 - tmp246 tmp248 = triton_helpers.maximum(tmp247, tmp17) tmp249 = libdevice.pow(tmp248, tmp19) tmp250 = tmp21 - tmp246 tmp251 = triton_helpers.maximum(tmp250, tmp17) tmp252 = libdevice.pow(tmp251, tmp19) tmp253 = tmp249 + tmp252 tmp254 = tmp26 - tmp246 tmp255 = triton_helpers.maximum(tmp254, tmp17) tmp256 = libdevice.pow(tmp255, tmp19) tmp257 = tmp253 + tmp256 tmp258 = tmp31 - tmp246 tmp259 = triton_helpers.maximum(tmp258, tmp17) tmp260 = libdevice.pow(tmp259, tmp19) tmp261 = tmp257 + tmp260 tmp262 = tmp261 - tmp10 tmp263 = tmp262 * tmp52 tmp264 = tmp263 >= tmp17 tmp265 = tl.where(tmp264, tmp246, tmp244) tmp266 = tmp245 * tmp1 tmp267 = tmp265 + tmp266 tmp268 = tmp2 - tmp267 tmp269 = triton_helpers.maximum(tmp268, tmp17) tmp270 = libdevice.pow(tmp269, tmp19) tmp271 = tmp21 - tmp267 tmp272 = triton_helpers.maximum(tmp271, tmp17) tmp273 = libdevice.pow(tmp272, tmp19) tmp274 = tmp270 + tmp273 tmp275 = tmp26 - tmp267 tmp276 = triton_helpers.maximum(tmp275, tmp17) tmp277 = libdevice.pow(tmp276, tmp19) tmp278 = tmp274 + tmp277 tmp279 = tmp31 - tmp267 tmp280 = triton_helpers.maximum(tmp279, tmp17) tmp281 = libdevice.pow(tmp280, tmp19) tmp282 = tmp278 + tmp281 tmp283 = tmp282 - tmp10 tmp284 = tmp283 * tmp52 tmp285 = tmp284 >= tmp17 tmp286 = tl.where(tmp285, tmp267, tmp265) tmp287 = tmp266 * tmp1 tmp288 = tmp286 + tmp287 tmp289 = tmp2 - tmp288 tmp290 = triton_helpers.maximum(tmp289, tmp17) tmp291 = libdevice.pow(tmp290, tmp19) tmp292 = tmp21 - tmp288 tmp293 = triton_helpers.maximum(tmp292, tmp17) tmp294 = libdevice.pow(tmp293, tmp19) tmp295 = tmp291 + tmp294 tmp296 = tmp26 - tmp288 tmp297 = triton_helpers.maximum(tmp296, tmp17) tmp298 = libdevice.pow(tmp297, tmp19) tmp299 = tmp295 + tmp298 tmp300 = tmp31 - tmp288 tmp301 = triton_helpers.maximum(tmp300, tmp17) tmp302 = libdevice.pow(tmp301, tmp19) tmp303 = tmp299 + tmp302 tmp304 = tmp303 - tmp10 tmp305 = tmp304 * tmp52 tmp306 = tmp305 >= tmp17 tmp307 = tl.where(tmp306, tmp288, tmp286) tmp308 = tmp287 * tmp1 tmp309 = tmp307 + tmp308 tmp310 = tmp2 - tmp309 tmp311 = triton_helpers.maximum(tmp310, tmp17) tmp312 = libdevice.pow(tmp311, tmp19) tmp313 = tmp21 - tmp309 tmp314 = triton_helpers.maximum(tmp313, tmp17) tmp315 = libdevice.pow(tmp314, tmp19) tmp316 = tmp312 + tmp315 tmp317 = tmp26 - tmp309 tmp318 = triton_helpers.maximum(tmp317, tmp17) tmp319 = libdevice.pow(tmp318, tmp19) tmp320 = tmp316 + tmp319 tmp321 = tmp31 - tmp309 tmp322 = triton_helpers.maximum(tmp321, tmp17) tmp323 = libdevice.pow(tmp322, tmp19) tmp324 = tmp320 + tmp323 tmp325 = tmp324 - tmp10 tmp326 = tmp325 * tmp52 tmp327 = tmp326 >= tmp17 tmp328 = tl.where(tmp327, tmp309, tmp307) tmp329 = tmp308 * tmp1 tmp330 = tmp328 + tmp329 tmp331 = tmp2 - tmp330 tmp332 = triton_helpers.maximum(tmp331, tmp17) tmp333 = libdevice.pow(tmp332, tmp19) tmp334 = tmp21 - tmp330 tmp335 = triton_helpers.maximum(tmp334, tmp17) tmp336 = libdevice.pow(tmp335, tmp19) tmp337 = tmp333 + tmp336 tmp338 = tmp26 - tmp330 tmp339 = triton_helpers.maximum(tmp338, tmp17) tmp340 = libdevice.pow(tmp339, tmp19) tmp341 = tmp337 + tmp340 tmp342 = tmp31 - tmp330 tmp343 = triton_helpers.maximum(tmp342, tmp17) tmp344 = libdevice.pow(tmp343, tmp19) tmp345 = tmp341 + tmp344 tmp346 = tmp345 - tmp10 tmp347 = tmp346 * tmp52 tmp348 = tmp347 >= tmp17 tmp349 = tl.where(tmp348, tmp330, tmp328) tmp350 = tmp329 * tmp1 tmp351 = tmp349 + tmp350 tmp352 = tmp2 - tmp351 tmp353 = triton_helpers.maximum(tmp352, tmp17) tmp354 = libdevice.pow(tmp353, tmp19) tmp355 = tmp21 - tmp351 tmp356 = triton_helpers.maximum(tmp355, tmp17) tmp357 = libdevice.pow(tmp356, tmp19) tmp358 = tmp354 + tmp357 tmp359 = tmp26 - tmp351 tmp360 = triton_helpers.maximum(tmp359, tmp17) tmp361 = libdevice.pow(tmp360, tmp19) tmp362 = tmp358 + tmp361 tmp363 = tmp31 - tmp351 tmp364 = triton_helpers.maximum(tmp363, tmp17) tmp365 = libdevice.pow(tmp364, tmp19) tmp366 = tmp362 + tmp365 tmp367 = tmp366 - tmp10 tmp368 = tmp367 * tmp52 tmp369 = tmp368 >= tmp17 tmp370 = tl.where(tmp369, tmp351, tmp349) tmp371 = tmp350 * tmp1 tmp372 = tmp370 + tmp371 tmp373 = tmp2 - tmp372 tmp374 = triton_helpers.maximum(tmp373, tmp17) tmp375 = libdevice.pow(tmp374, tmp19) tmp376 = tmp21 - tmp372 tmp377 = triton_helpers.maximum(tmp376, tmp17) tmp378 = libdevice.pow(tmp377, tmp19) tmp379 = tmp375 + tmp378 tmp380 = tmp26 - tmp372 tmp381 = triton_helpers.maximum(tmp380, tmp17) tmp382 = libdevice.pow(tmp381, tmp19) tmp383 = tmp379 + tmp382 tmp384 = tmp31 - tmp372 tmp385 = triton_helpers.maximum(tmp384, tmp17) tmp386 = libdevice.pow(tmp385, tmp19) tmp387 = tmp383 + tmp386 tmp388 = tmp387 - tmp10 tmp389 = tmp388 * tmp52 tmp390 = tmp389 >= tmp17 tmp391 = tl.where(tmp390, tmp372, tmp370) tmp392 = tmp371 * tmp1 tmp393 = tmp391 + tmp392 tmp394 = tmp2 - tmp393 tmp395 = triton_helpers.maximum(tmp394, tmp17) tmp396 = libdevice.pow(tmp395, tmp19) tmp397 = tmp21 - tmp393 tmp398 = triton_helpers.maximum(tmp397, tmp17) tmp399 = libdevice.pow(tmp398, tmp19) tmp400 = tmp396 + tmp399 tmp401 = tmp26 - tmp393 tmp402 = triton_helpers.maximum(tmp401, tmp17) tmp403 = libdevice.pow(tmp402, tmp19) tmp404 = tmp400 + tmp403 tmp405 = tmp31 - tmp393 tmp406 = triton_helpers.maximum(tmp405, tmp17) tmp407 = libdevice.pow(tmp406, tmp19) tmp408 = tmp404 + tmp407 tmp409 = tmp408 - tmp10 tmp410 = tmp409 * tmp52 tmp411 = tmp410 >= tmp17 tmp412 = tl.where(tmp411, tmp393, tmp391) tmp413 = tmp392 * tmp1 tmp414 = tmp412 + tmp413 tmp415 = tmp2 - tmp414 tmp416 = triton_helpers.maximum(tmp415, tmp17) tmp417 = libdevice.pow(tmp416, tmp19) tmp418 = tmp21 - tmp414 tmp419 = triton_helpers.maximum(tmp418, tmp17) tmp420 = libdevice.pow(tmp419, tmp19) tmp421 = tmp417 + tmp420 tmp422 = tmp26 - tmp414 tmp423 = triton_helpers.maximum(tmp422, tmp17) tmp424 = libdevice.pow(tmp423, tmp19) tmp425 = tmp421 + tmp424 tmp426 = tmp31 - tmp414 tmp427 = triton_helpers.maximum(tmp426, tmp17) tmp428 = libdevice.pow(tmp427, tmp19) tmp429 = tmp425 + tmp428 tmp430 = tmp429 - tmp10 tmp431 = tmp430 * tmp52 tmp432 = tmp431 >= tmp17 tmp433 = tl.where(tmp432, tmp414, tmp412) tmp434 = tmp413 * tmp1 tmp435 = tmp433 + tmp434 tmp436 = tmp2 - tmp435 tmp437 = triton_helpers.maximum(tmp436, tmp17) tmp438 = libdevice.pow(tmp437, tmp19) tmp439 = tmp21 - tmp435 tmp440 = triton_helpers.maximum(tmp439, tmp17) tmp441 = libdevice.pow(tmp440, tmp19) tmp442 = tmp438 + tmp441 tmp443 = tmp26 - tmp435 tmp444 = triton_helpers.maximum(tmp443, tmp17) tmp445 = libdevice.pow(tmp444, tmp19) tmp446 = tmp442 + tmp445 tmp447 = tmp31 - tmp435 tmp448 = triton_helpers.maximum(tmp447, tmp17) tmp449 = libdevice.pow(tmp448, tmp19) tmp450 = tmp446 + tmp449 tmp451 = tmp450 - tmp10 tmp452 = tmp451 * tmp52 tmp453 = tmp452 >= tmp17 tmp454 = tl.where(tmp453, tmp435, tmp433) tmp455 = tmp434 * tmp1 tmp456 = tmp454 + tmp455 tmp457 = tmp2 - tmp456 tmp458 = triton_helpers.maximum(tmp457, tmp17) tmp459 = libdevice.pow(tmp458, tmp19) tmp460 = tmp21 - tmp456 tmp461 = triton_helpers.maximum(tmp460, tmp17) tmp462 = libdevice.pow(tmp461, tmp19) tmp463 = tmp459 + tmp462 tmp464 = tmp26 - tmp456 tmp465 = triton_helpers.maximum(tmp464, tmp17) tmp466 = libdevice.pow(tmp465, tmp19) tmp467 = tmp463 + tmp466 tmp468 = tmp31 - tmp456 tmp469 = triton_helpers.maximum(tmp468, tmp17) tmp470 = libdevice.pow(tmp469, tmp19) tmp471 = tmp467 + tmp470 tmp472 = tmp471 - tmp10 tmp473 = tmp472 * tmp52 tmp474 = tmp473 >= tmp17 tmp475 = tl.where(tmp474, tmp456, tmp454) tmp476 = tmp455 * tmp1 tmp477 = tmp475 + tmp476 tmp478 = tmp2 - tmp477 tmp479 = triton_helpers.maximum(tmp478, tmp17) tmp480 = libdevice.pow(tmp479, tmp19) tmp481 = tmp21 - tmp477 tmp482 = triton_helpers.maximum(tmp481, tmp17) tmp483 = libdevice.pow(tmp482, tmp19) tmp484 = tmp480 + tmp483 tmp485 = tmp26 - tmp477 tmp486 = triton_helpers.maximum(tmp485, tmp17) tmp487 = libdevice.pow(tmp486, tmp19) tmp488 = tmp484 + tmp487 tmp489 = tmp31 - tmp477 tmp490 = triton_helpers.maximum(tmp489, tmp17) tmp491 = libdevice.pow(tmp490, tmp19) tmp492 = tmp488 + tmp491 tmp493 = tmp492 - tmp10 tmp494 = tmp493 * tmp52 tmp495 = tmp494 >= tmp17 tmp496 = tl.where(tmp495, tmp477, tmp475) tmp497 = tmp476 * tmp1 tmp498 = tmp496 + tmp497 tmp499 = tmp2 - tmp498 tmp500 = triton_helpers.maximum(tmp499, tmp17) tmp501 = libdevice.pow(tmp500, tmp19) tmp502 = tmp21 - tmp498 tmp503 = triton_helpers.maximum(tmp502, tmp17) tmp504 = libdevice.pow(tmp503, tmp19) tmp505 = tmp501 + tmp504 tmp506 = tmp26 - tmp498 tmp507 = triton_helpers.maximum(tmp506, tmp17) tmp508 = libdevice.pow(tmp507, tmp19) tmp509 = tmp505 + tmp508 tmp510 = tmp31 - tmp498 tmp511 = triton_helpers.maximum(tmp510, tmp17) tmp512 = libdevice.pow(tmp511, tmp19) tmp513 = tmp509 + tmp512 tmp514 = tmp513 - tmp10 tmp515 = tmp514 * tmp52 tmp516 = tmp515 >= tmp17 tmp517 = tl.where(tmp516, tmp498, tmp496) tmp518 = tmp497 * tmp1 tmp519 = tmp517 + tmp518 tmp520 = tmp2 - tmp519 tmp521 = triton_helpers.maximum(tmp520, tmp17) tmp522 = libdevice.pow(tmp521, tmp19) tmp523 = tmp21 - tmp519 tmp524 = triton_helpers.maximum(tmp523, tmp17) tmp525 = libdevice.pow(tmp524, tmp19) tmp526 = tmp522 + tmp525 tmp527 = tmp26 - tmp519 tmp528 = triton_helpers.maximum(tmp527, tmp17) tmp529 = libdevice.pow(tmp528, tmp19) tmp530 = tmp526 + tmp529 tmp531 = tmp31 - tmp519 tmp532 = triton_helpers.maximum(tmp531, tmp17) tmp533 = libdevice.pow(tmp532, tmp19) tmp534 = tmp530 + tmp533 tmp535 = tmp534 - tmp10 tmp536 = tmp535 * tmp52 tmp537 = tmp536 >= tmp17 tmp538 = tl.where(tmp537, tmp519, tmp517) tmp539 = tmp518 * tmp1 tmp540 = tmp538 + tmp539 tmp541 = tmp2 - tmp540 tmp542 = triton_helpers.maximum(tmp541, tmp17) tmp543 = libdevice.pow(tmp542, tmp19) tmp544 = tmp21 - tmp540 tmp545 = triton_helpers.maximum(tmp544, tmp17) tmp546 = libdevice.pow(tmp545, tmp19) tmp547 = tmp543 + tmp546 tmp548 = tmp26 - tmp540 tmp549 = triton_helpers.maximum(tmp548, tmp17) tmp550 = libdevice.pow(tmp549, tmp19) tmp551 = tmp547 + tmp550 tmp552 = tmp31 - tmp540 tmp553 = triton_helpers.maximum(tmp552, tmp17) tmp554 = libdevice.pow(tmp553, tmp19) tmp555 = tmp551 + tmp554 tmp556 = tmp555 - tmp10 tmp557 = tmp556 * tmp52 tmp558 = tmp557 >= tmp17 tmp559 = tl.where(tmp558, tmp540, tmp538) tmp560 = tmp539 * tmp1 tmp561 = tmp559 + tmp560 tmp562 = tmp2 - tmp561 tmp563 = triton_helpers.maximum(tmp562, tmp17) tmp564 = libdevice.pow(tmp563, tmp19) tmp565 = tmp21 - tmp561 tmp566 = triton_helpers.maximum(tmp565, tmp17) tmp567 = libdevice.pow(tmp566, tmp19) tmp568 = tmp564 + tmp567 tmp569 = tmp26 - tmp561 tmp570 = triton_helpers.maximum(tmp569, tmp17) tmp571 = libdevice.pow(tmp570, tmp19) tmp572 = tmp568 + tmp571 tmp573 = tmp31 - tmp561 tmp574 = triton_helpers.maximum(tmp573, tmp17) tmp575 = libdevice.pow(tmp574, tmp19) tmp576 = tmp572 + tmp575 tmp577 = tmp576 - tmp10 tmp578 = tmp577 * tmp52 tmp579 = tmp578 >= tmp17 tmp580 = tl.where(tmp579, tmp561, tmp559) tmp581 = tmp560 * tmp1 tmp582 = tmp580 + tmp581 tmp583 = tmp2 - tmp582 tmp584 = triton_helpers.maximum(tmp583, tmp17) tmp585 = libdevice.pow(tmp584, tmp19) tmp586 = tmp21 - tmp582 tmp587 = triton_helpers.maximum(tmp586, tmp17) tmp588 = libdevice.pow(tmp587, tmp19) tmp589 = tmp585 + tmp588 tmp590 = tmp26 - tmp582 tmp591 = triton_helpers.maximum(tmp590, tmp17) tmp592 = libdevice.pow(tmp591, tmp19) tmp593 = tmp589 + tmp592 tmp594 = tmp31 - tmp582 tmp595 = triton_helpers.maximum(tmp594, tmp17) tmp596 = libdevice.pow(tmp595, tmp19) tmp597 = tmp593 + tmp596 tmp598 = tmp597 - tmp10 tmp599 = tmp598 * tmp52 tmp600 = tmp599 >= tmp17 tmp601 = tl.where(tmp600, tmp582, tmp580) tmp602 = tmp581 * tmp1 tmp603 = tmp601 + tmp602 tmp604 = tmp2 - tmp603 tmp605 = triton_helpers.maximum(tmp604, tmp17) tmp606 = libdevice.pow(tmp605, tmp19) tmp607 = tmp21 - tmp603 tmp608 = triton_helpers.maximum(tmp607, tmp17) tmp609 = libdevice.pow(tmp608, tmp19) tmp610 = tmp606 + tmp609 tmp611 = tmp26 - tmp603 tmp612 = triton_helpers.maximum(tmp611, tmp17) tmp613 = libdevice.pow(tmp612, tmp19) tmp614 = tmp610 + tmp613 tmp615 = tmp31 - tmp603 tmp616 = triton_helpers.maximum(tmp615, tmp17) tmp617 = libdevice.pow(tmp616, tmp19) tmp618 = tmp614 + tmp617 tl.store(out_ptr0 + x0, tmp50, xmask) tl.store(out_ptr25 + x0, tmp392, xmask) tl.store(in_out_ptr13 + x0, tmp601, xmask) tl.store(out_ptr31 + x0, tmp618, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp11 + tmp22 tmp24 = tl.where(tmp10, tmp23, tmp11) tmp25 = tmp22 * tmp1 tmp26 = tmp24 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp9) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_ptr2 + x0, xmask) tmp18 = tl.load(in_out_ptr0 + x0, xmask) tmp19 = tl.load(in_ptr3 + x0, xmask) tmp36 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp15 = tmp14 - tmp7 tmp16 = tmp15 * tmp10 tmp17 = tmp16 >= tmp12 tmp20 = 0.5 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = tmp22 * tmp20 tmp24 = tmp23 * tmp20 tmp25 = tmp24 * tmp20 tmp26 = tmp25 * tmp20 tmp27 = tmp26 * tmp20 tmp28 = tmp27 * tmp20 tmp29 = tmp28 * tmp20 tmp30 = tmp29 * tmp20 tmp31 = tmp18 + tmp30 tmp32 = tl.where(tmp17, tmp31, tmp18) tmp33 = tmp30 * tmp20 tmp34 = tmp32 + tmp33 tmp35 = tl.where(tmp13, tmp34, tmp32) tmp37 = tmp36 * tmp20 tmp38 = tmp33 * tmp20 tmp39 = tmp35 + tmp38 tmp40 = tmp37 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp12) tmp42 = 2.0 tmp43 = libdevice.pow(tmp41, tmp42) tmp45 = tmp44 * tmp20 tmp46 = tmp45 - tmp39 tmp47 = triton_helpers.maximum(tmp46, tmp12) tmp48 = libdevice.pow(tmp47, tmp42) tmp49 = tmp43 + tmp48 tmp51 = tmp50 * tmp20 tmp52 = tmp51 - tmp39 tmp53 = triton_helpers.maximum(tmp52, tmp12) tmp54 = libdevice.pow(tmp53, tmp42) tmp55 = tmp49 + tmp54 tmp57 = tmp56 * tmp20 tmp58 = tmp57 - tmp39 tmp59 = triton_helpers.maximum(tmp58, tmp12) tmp60 = libdevice.pow(tmp59, tmp42) tmp61 = tmp55 + tmp60 tl.store(in_out_ptr0 + x0, tmp35, xmask) tl.store(out_ptr0 + x0, tmp61, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp11 + tmp24 tmp26 = tl.where(tmp10, tmp25, tmp11) tmp27 = tmp24 * tmp1 tmp28 = tmp26 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = triton_helpers.maximum(tmp29, tmp9) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + x0, xmask) tmp19 = tl.load(in_ptr2 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr3 + x0, xmask) tmp43 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp56 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp20 = tmp19 - tmp12 tmp21 = tmp20 * tmp15 tmp22 = tmp21 >= tmp17 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp23 + tmp37 tmp39 = tl.where(tmp22, tmp38, tmp23) tmp40 = tmp37 * tmp25 tmp41 = tmp39 + tmp40 tmp42 = tl.where(tmp18, tmp41, tmp39) tmp44 = tmp43 * tmp25 tmp45 = tmp40 * tmp25 tmp46 = tmp42 + tmp45 tmp47 = tmp44 - tmp46 tmp48 = triton_helpers.maximum(tmp47, tmp17) tmp49 = libdevice.pow(tmp48, tmp1) tmp51 = tmp50 * tmp25 tmp52 = tmp51 - tmp46 tmp53 = triton_helpers.maximum(tmp52, tmp17) tmp54 = libdevice.pow(tmp53, tmp1) tmp55 = tmp49 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp46 tmp59 = triton_helpers.maximum(tmp58, tmp17) tmp60 = libdevice.pow(tmp59, tmp1) tmp61 = tmp55 + tmp60 tmp63 = tmp62 * tmp25 tmp64 = tmp63 - tmp46 tmp65 = triton_helpers.maximum(tmp64, tmp17) tmp66 = libdevice.pow(tmp65, tmp1) tmp67 = tmp61 + tmp66 tl.store(in_out_ptr0 + x0, tmp42, xmask) tl.store(out_ptr0 + x0, tmp67, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp11 + tmp26 tmp28 = tl.where(tmp10, tmp27, tmp11) tmp29 = tmp26 * tmp1 tmp30 = tmp28 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_ptr2 + x0, xmask) tmp27 = tl.load(in_out_ptr0 + x0, xmask) tmp28 = tl.load(in_ptr3 + x0, xmask) tmp49 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp62 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp68 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp42 * tmp29 tmp44 = tmp27 + tmp43 tmp45 = tl.where(tmp26, tmp44, tmp27) tmp46 = tmp43 * tmp29 tmp47 = tmp45 + tmp46 tmp48 = tl.where(tmp22, tmp47, tmp45) tmp50 = tmp49 * tmp29 tmp51 = tmp46 * tmp29 tmp52 = tmp48 + tmp51 tmp53 = tmp50 - tmp52 tmp54 = triton_helpers.maximum(tmp53, tmp1) tmp55 = libdevice.pow(tmp54, tmp3) tmp57 = tmp56 * tmp29 tmp58 = tmp57 - tmp52 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp61 = tmp55 + tmp60 tmp63 = tmp62 * tmp29 tmp64 = tmp63 - tmp52 tmp65 = triton_helpers.maximum(tmp64, tmp1) tmp66 = libdevice.pow(tmp65, tmp3) tmp67 = tmp61 + tmp66 tmp69 = tmp68 * tmp29 tmp70 = tmp69 - tmp52 tmp71 = triton_helpers.maximum(tmp70, tmp1) tmp72 = libdevice.pow(tmp71, tmp3) tmp73 = tmp67 + tmp72 tmp74 = tmp73 - tmp17 tmp75 = tmp74 * tmp20 tmp76 = tmp75 >= tmp1 tmp77 = tl.where(tmp76, tmp52, tmp48) tmp78 = tmp51 * tmp29 tmp79 = tmp77 + tmp78 tmp80 = tmp50 - tmp79 tmp81 = triton_helpers.maximum(tmp80, tmp1) tmp82 = libdevice.pow(tmp81, tmp3) tmp83 = tmp57 - tmp79 tmp84 = triton_helpers.maximum(tmp83, tmp1) tmp85 = libdevice.pow(tmp84, tmp3) tmp86 = tmp82 + tmp85 tmp87 = tmp63 - tmp79 tmp88 = triton_helpers.maximum(tmp87, tmp1) tmp89 = libdevice.pow(tmp88, tmp3) tmp90 = tmp86 + tmp89 tmp91 = tmp69 - tmp79 tmp92 = triton_helpers.maximum(tmp91, tmp1) tmp93 = libdevice.pow(tmp92, tmp3) tmp94 = tmp90 + tmp93 tmp95 = tmp94 - tmp17 tmp96 = tmp95 * tmp20 tmp97 = tmp96 >= tmp1 tmp98 = tl.where(tmp97, tmp79, tmp77) tmp99 = tmp78 * tmp29 tmp100 = tmp98 + tmp99 tmp101 = tmp50 - tmp100 tmp102 = triton_helpers.maximum(tmp101, tmp1) tmp103 = libdevice.pow(tmp102, tmp3) tmp104 = tmp57 - tmp100 tmp105 = triton_helpers.maximum(tmp104, tmp1) tmp106 = libdevice.pow(tmp105, tmp3) tmp107 = tmp103 + tmp106 tmp108 = tmp63 - tmp100 tmp109 = triton_helpers.maximum(tmp108, tmp1) tmp110 = libdevice.pow(tmp109, tmp3) tmp111 = tmp107 + tmp110 tmp112 = tmp69 - tmp100 tmp113 = triton_helpers.maximum(tmp112, tmp1) tmp114 = libdevice.pow(tmp113, tmp3) tmp115 = tmp111 + tmp114 tmp116 = tmp115 - tmp17 tmp117 = tmp116 * tmp20 tmp118 = tmp117 >= tmp1 tmp119 = tl.where(tmp118, tmp100, tmp98) tmp120 = tmp99 * tmp29 tmp121 = tmp119 + tmp120 tmp122 = tmp50 - tmp121 tmp123 = triton_helpers.maximum(tmp122, tmp1) tmp124 = libdevice.pow(tmp123, tmp3) tmp125 = tmp57 - tmp121 tmp126 = triton_helpers.maximum(tmp125, tmp1) tmp127 = libdevice.pow(tmp126, tmp3) tmp128 = tmp124 + tmp127 tmp129 = tmp63 - tmp121 tmp130 = triton_helpers.maximum(tmp129, tmp1) tmp131 = libdevice.pow(tmp130, tmp3) tmp132 = tmp128 + tmp131 tmp133 = tmp69 - tmp121 tmp134 = triton_helpers.maximum(tmp133, tmp1) tmp135 = libdevice.pow(tmp134, tmp3) tmp136 = tmp132 + tmp135 tmp137 = tmp136 - tmp17 tmp138 = tmp137 * tmp20 tmp139 = tmp138 >= tmp1 tmp140 = tl.where(tmp139, tmp121, tmp119) tl.store(in_out_ptr3 + x0, tmp140, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp3 + tmp24 tmp26 = tmp2 - tmp25 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_out_ptr0 + x0, xmask) tmp15 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp14 + tmp36 tmp38 = tl.where(tmp13, tmp37, tmp14) tl.store(in_out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp3 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = 0.0 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_out_ptr0 + x0, xmask) tmp15 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp36 * tmp16 tmp38 = tmp14 + tmp37 tmp39 = tl.where(tmp13, tmp38, tmp14) tl.store(in_out_ptr0 + x0, tmp39, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp3 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = 0.0 tmp30 = triton_helpers.maximum(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + x0, xmask) tmp19 = tl.load(in_out_ptr0 + x0, xmask) tmp20 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp19 + tmp43 tmp45 = tl.where(tmp18, tmp44, tmp19) tl.store(in_out_ptr0 + x0, tmp45, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp3 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = 0.0 tmp31 = triton_helpers.maximum(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + x0, xmask) tmp19 = tl.load(in_out_ptr0 + x0, xmask) tmp20 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp43 * tmp21 tmp45 = tmp19 + tmp44 tmp46 = tl.where(tmp18, tmp45, tmp19) tl.store(in_out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp3 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp23 + tmp49 tmp51 = tl.where(tmp22, tmp50, tmp23) tl.store(in_out_ptr0 + x0, tmp51, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp3 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp23 + tmp50 tmp52 = tl.where(tmp22, tmp51, tmp23) tl.store(in_out_ptr0 + x0, tmp52, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp29 * tmp1 tmp31 = tmp3 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr3, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp56 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp67 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp73 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp50 * tmp25 tmp52 = tmp23 + tmp51 tmp53 = tl.where(tmp22, tmp52, tmp23) tmp54 = tmp51 * tmp25 tmp55 = tmp53 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp55 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp62 = tmp61 * tmp25 tmp63 = tmp62 - tmp55 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp25 tmp69 = tmp68 - tmp55 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tmp74 = tmp73 * tmp25 tmp75 = tmp74 - tmp55 tmp76 = triton_helpers.maximum(tmp75, tmp1) tmp77 = libdevice.pow(tmp76, tmp3) tmp78 = tmp72 + tmp77 tmp79 = tmp78 - tmp17 tmp80 = tmp79 * tmp20 tmp81 = tmp80 >= tmp1 tmp82 = tl.where(tmp81, tmp55, tmp53) tmp83 = tmp54 * tmp25 tmp84 = tmp82 + tmp83 tmp85 = tmp57 - tmp84 tmp86 = triton_helpers.maximum(tmp85, tmp1) tmp87 = libdevice.pow(tmp86, tmp3) tmp88 = tmp62 - tmp84 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp68 - tmp84 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp74 - tmp84 tmp97 = triton_helpers.maximum(tmp96, tmp1) tmp98 = libdevice.pow(tmp97, tmp3) tmp99 = tmp95 + tmp98 tmp100 = tmp99 - tmp17 tmp101 = tmp100 * tmp20 tmp102 = tmp101 >= tmp1 tmp103 = tl.where(tmp102, tmp84, tmp82) tmp104 = tmp83 * tmp25 tmp105 = tmp103 + tmp104 tmp106 = tmp57 - tmp105 tmp107 = triton_helpers.maximum(tmp106, tmp1) tmp108 = libdevice.pow(tmp107, tmp3) tmp109 = tmp62 - tmp105 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp68 - tmp105 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp74 - tmp105 tmp118 = triton_helpers.maximum(tmp117, tmp1) tmp119 = libdevice.pow(tmp118, tmp3) tmp120 = tmp116 + tmp119 tmp121 = tmp120 - tmp17 tmp122 = tmp121 * tmp20 tmp123 = tmp122 >= tmp1 tmp124 = tl.where(tmp123, tmp105, tmp103) tmp125 = tmp104 * tmp25 tmp126 = tmp124 + tmp125 tmp127 = tmp57 - tmp126 tmp128 = triton_helpers.maximum(tmp127, tmp1) tmp129 = libdevice.pow(tmp128, tmp3) tmp130 = tmp62 - tmp126 tmp131 = triton_helpers.maximum(tmp130, tmp1) tmp132 = libdevice.pow(tmp131, tmp3) tmp133 = tmp129 + tmp132 tmp134 = tmp68 - tmp126 tmp135 = triton_helpers.maximum(tmp134, tmp1) tmp136 = libdevice.pow(tmp135, tmp3) tmp137 = tmp133 + tmp136 tmp138 = tmp74 - tmp126 tmp139 = triton_helpers.maximum(tmp138, tmp1) tmp140 = libdevice.pow(tmp139, tmp3) tmp141 = tmp137 + tmp140 tmp142 = tmp141 - tmp17 tmp143 = tmp142 * tmp20 tmp144 = tmp143 >= tmp1 tmp145 = tl.where(tmp144, tmp126, tmp124) tmp146 = tmp125 * tmp25 tmp147 = tmp145 + tmp146 tmp148 = tmp57 - tmp147 tmp149 = triton_helpers.maximum(tmp148, tmp1) tmp150 = libdevice.pow(tmp149, tmp3) tmp151 = tmp62 - tmp147 tmp152 = triton_helpers.maximum(tmp151, tmp1) tmp153 = libdevice.pow(tmp152, tmp3) tmp154 = tmp150 + tmp153 tmp155 = tmp68 - tmp147 tmp156 = triton_helpers.maximum(tmp155, tmp1) tmp157 = libdevice.pow(tmp156, tmp3) tmp158 = tmp154 + tmp157 tmp159 = tmp74 - tmp147 tmp160 = triton_helpers.maximum(tmp159, tmp1) tmp161 = libdevice.pow(tmp160, tmp3) tmp162 = tmp158 + tmp161 tl.store(out_ptr3 + x0, tmp104, xmask) tl.store(in_out_ptr2 + x0, tmp145, xmask) tl.store(out_ptr5 + x0, tmp162, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp11 + tmp14 tmp16 = tl.where(tmp10, tmp15, tmp11) tmp17 = tmp14 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = tmp2 - tmp18 tmp20 = triton_helpers.maximum(tmp19, tmp9) tmp21 = 2.0 tmp22 = libdevice.pow(tmp20, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_div_22(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf43 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf57 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf58 = reinterpret_tensor(buf57, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf57 buf59 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0[grid(64)]( buf58, arg0_1, buf1, buf43, buf59, 64, XBLOCK=64, num_warps=1, num_stages=1) buf60 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_mul_pow_sub_where_1[grid(256)](arg0_1, buf59, buf1, buf58, buf43, buf60, 256, XBLOCK=128, num_warps=4, num_stages=1) buf61 = buf58 del buf58 buf62 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2[grid(64)](buf61, buf60, buf1, buf59, buf43, arg0_1, buf62, 64, XBLOCK=64, num_warps=1, num_stages=1) buf63 = buf60 del buf60 triton_poi_fused_add_clamp_div_mul_sub_where_3[grid(256)](arg0_1, buf62, buf1, buf61, buf43, buf63, 256, XBLOCK=256, num_warps=4, num_stages=1) buf64 = buf61 del buf61 buf65 = buf59 del buf59 triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4[grid(64)](buf64, buf63, buf1, buf62, buf43, arg0_1, buf65, 64, XBLOCK=64, num_warps=1, num_stages=1) buf66 = buf63 del buf63 triton_poi_fused_add_div_mul_sub_where_5[grid(256)](arg0_1, buf65, buf1, buf64, buf43, buf66, 256, XBLOCK=256, num_warps=4, num_stages=1) buf67 = buf64 del buf64 buf74 = buf62 del buf62 buf75 = reinterpret_tensor(buf74, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf74 triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6[grid(64)](buf67, buf75, buf66, buf1, buf65, buf43, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf76 = buf66 del buf66 triton_poi_fused_add_clamp_div_mul_pow_sub_7[grid(256)](arg0_1, buf75, buf43, buf76, 256, XBLOCK=256, num_warps=4, num_stages=1) buf77 = buf75 del buf75 triton_poi_fused_add_div_where_8[grid(64)](buf77, buf76, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf78 = buf76 del buf76 triton_poi_fused_add_clamp_div_mul_pow_sub_9[grid(256)](arg0_1, buf77, buf43, buf78, 256, XBLOCK=256, num_warps=4, num_stages=1) buf79 = buf77 del buf77 triton_poi_fused_add_div_where_10[grid(64)](buf79, buf78, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf80 = buf78 del buf78 triton_poi_fused_add_clamp_div_mul_sub_11[grid(256)](arg0_1, buf79, buf43, buf80, 256, XBLOCK=256, num_warps=4, num_stages=1) buf81 = buf79 del buf79 triton_poi_fused_add_div_where_12[grid(64)](buf81, buf80, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf82 = buf80 del buf80 triton_poi_fused_add_clamp_div_mul_sub_13[grid(256)](arg0_1, buf81, buf43, buf82, 256, XBLOCK=256, num_warps=4, num_stages=1) buf83 = buf81 del buf81 triton_poi_fused_add_div_where_14[grid(64)](buf83, buf82, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf84 = buf82 del buf82 triton_poi_fused_add_div_mul_sub_15[grid(256)](arg0_1, buf83, buf43, buf84, 256, XBLOCK=256, num_warps=4, num_stages=1) buf85 = buf83 del buf83 triton_poi_fused_add_div_where_16[grid(64)](buf85, buf84, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf86 = buf84 del buf84 triton_poi_fused_add_div_mul_sub_17[grid(256)](arg0_1, buf85, buf43, buf86, 256, XBLOCK=256, num_warps=4, num_stages=1) buf87 = buf85 del buf85 triton_poi_fused_add_div_where_18[grid(64)](buf87, buf86, buf1, buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf88 = buf86 del buf86 triton_poi_fused_add_div_mul_sub_19[grid(256)](arg0_1, buf87, buf43, buf88, 256, XBLOCK=256, num_warps=4, num_stages=1) buf89 = buf87 del buf87 buf95 = buf67 del buf67 buf97 = buf65 del buf65 buf98 = reinterpret_tensor(buf97, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf97 buf99 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20[grid(64)](buf89 , buf98, buf88, buf1, buf43, arg0_1, buf95, buf99, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf43 del buf89 buf100 = buf88 del buf88 triton_poi_fused_add_clamp_div_mul_pow_sub_where_21[grid(256)](arg0_1, buf99, buf1, buf98, buf95, buf100, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf1 del buf95 del buf98 del buf99 buf101 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) triton_poi_fused_div_22[grid(256)](buf100, buf101, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf100 return buf101, def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True): """alpha-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. This function is differentiable with respect to both X and alpha. Parameters ---------- X : torch.Tensor The input tensor. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax, and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`: if you want softmax, we recommend `torch.nn.softmax`. dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] max_val, _ = X.max(dim=dim, keepdim=True) X = X * (alpha - 1) max_val = max_val * (alpha - 1) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class EntmaxBisectNew(nn.Module): def __init__(self, alpha=1.5, dim=-1, n_iter=50): """alpha-entmax: normalizing sparse map (a la softmax) via bisection. Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. Parameters ---------- alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax; and alpha=1 would in theory recover softmax. For numeric reasons, this algorithm does not work with `alpha=1`; if you want softmax, we recommend `torch.nn.softmax`. dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter self.alpha = alpha super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cifkao/entmax
EntmaxBisect
false
15,222
[ "MIT" ]
298
f18bab9318f9d2471a36545ee0b4c97be6d48a87
https://github.com/cifkao/entmax/tree/f18bab9318f9d2471a36545ee0b4c97be6d48a87
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ap/capr5gpndmsqwtzrelhk3pn347twep7l7ivmalrfu2fffhcrysj2.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 153760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fi/cfi3ouwmd7rrqwynt6ueflr5ybkjpqjbbyuhfpbei3cbmpw5pjnr.py # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # x => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 38440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x3 = (xindex // 31) x2 = (xindex // 9610) x4 = xindex % 9610 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + (9728*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sb/csbqlg2mwetmjkvat5jlclrm6y3xb7yufkfrquhfxsgey5p7obuf.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 67280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 841) % 20 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dh/cdhzqxap7eevuh4xmc324625y4c6jdpjpsr6rzvly5q6wak5t2ep.py # Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # x_1 => relu_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 15680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) % 14 x4 = (xindex // 196) x3 = (xindex // 3920) x5 = xindex % 3920 x6 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (58*x1) + (841*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (58*x1) + (841*x4)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (29 + (2*x0) + (58*x1) + (841*x4)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (30 + (2*x0) + (58*x1) + (841*x4)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x5 + (3968*x3)), tmp15, xmask) tl.store(out_ptr1 + (x6), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zm/czm7iogogfrg5w6aodfpdncu3jdprnzzxpbl2zscjrooitqarozs.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 144) % 50 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/po/cpod27wt4afn55vszfkv4damymve62i2eg7c46lpzjhmukw4llzy.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_3, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (12 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (13 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (14 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (15 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (24 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (25 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (26 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (27 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (36 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (37 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (38 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (39 + (4*x0) + (48*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + (x2), tmp30, xmask) tl.store(out_ptr1 + (x2), tmp76, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uo/cuosedreo4jye4ps4slbmf7xmwplpdvqcvufz3wjca3nbc52rq3x.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_5 => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%getitem_4, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_4, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 9 x2 = (xindex // 18) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (18*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (9 + x0 + (18*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (10, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (20, ), (1, )) assert_size_stride(primals_6, (50, 20, 3, 3), (180, 9, 3, 1)) assert_size_stride(primals_7, (50, ), (1, )) assert_size_stride(primals_8, (2, 50, 1, 1), (50, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 153760, grid=grid(153760), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 10, 31, 31), (9728, 961, 31, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 31, 31), (9610, 961, 31, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, 38440, grid=grid(38440), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 29, 29), (16820, 841, 29, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 67280, grid=grid(67280), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 14, 14), (3920, 196, 14, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_3.run(buf5, buf6, buf7, 15680, grid=grid(15680), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 50, 12, 12), (7200, 144, 12, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_7, 28800, grid=grid(28800), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 2, 12, 12), (288, 144, 12, 1)) buf11 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) buf12 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf10, buf11, buf12, 72, grid=grid(72), stream=stream0) buf13 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_6.run(buf11, buf13, 72, grid=grid(72), stream=stream0) del buf11 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((50, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2, 50, 1, 1), (50, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.conv2 = nn.Conv2d(10, 20, kernel_size=3) self.conv3 = nn.Conv2d(20, 50, kernel_size=3) self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0, stride=1) self.max_pool2d = nn.MaxPool2d((4, 4)) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2(x), 2)) x = self.conv3(x) x = self.conv4(x) x = self.max_pool2d(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 153760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 38440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 31 x3 = xindex // 31 x2 = xindex // 9610 x4 = xindex % 9610 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x4 + 9728 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 67280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 841 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 15680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 % 14 x4 = xindex // 196 x3 = xindex // 3920 x5 = xindex % 3920 x6 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (29 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (30 + 2 * x0 + 58 * x1 + 841 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x5 + 3968 * x3), tmp15, xmask) tl.store(out_ptr1 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 144 % 50 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 48 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 48 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (12 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (13 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (14 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (15 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (24 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (25 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (26 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (27 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (36 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (37 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (38 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (39 + 4 * x0 + 48 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp31 = tmp1 > tmp0 tmp32 = tl.full([1], 1, tl.int8) tmp33 = tl.full([1], 0, tl.int8) tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp3 > tmp2 tmp36 = tl.full([1], 2, tl.int8) tmp37 = tl.where(tmp35, tmp36, tmp34) tmp38 = tmp5 > tmp4 tmp39 = tl.full([1], 3, tl.int8) tmp40 = tl.where(tmp38, tmp39, tmp37) tmp41 = tmp7 > tmp6 tmp42 = tl.full([1], 4, tl.int8) tmp43 = tl.where(tmp41, tmp42, tmp40) tmp44 = tmp9 > tmp8 tmp45 = tl.full([1], 5, tl.int8) tmp46 = tl.where(tmp44, tmp45, tmp43) tmp47 = tmp11 > tmp10 tmp48 = tl.full([1], 6, tl.int8) tmp49 = tl.where(tmp47, tmp48, tmp46) tmp50 = tmp13 > tmp12 tmp51 = tl.full([1], 7, tl.int8) tmp52 = tl.where(tmp50, tmp51, tmp49) tmp53 = tmp15 > tmp14 tmp54 = tl.full([1], 8, tl.int8) tmp55 = tl.where(tmp53, tmp54, tmp52) tmp56 = tmp17 > tmp16 tmp57 = tl.full([1], 9, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp19 > tmp18 tmp60 = tl.full([1], 10, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp21 > tmp20 tmp63 = tl.full([1], 11, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp23 > tmp22 tmp66 = tl.full([1], 12, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp25 > tmp24 tmp69 = tl.full([1], 13, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp27 > tmp26 tmp72 = tl.full([1], 14, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp29 > tmp28 tmp75 = tl.full([1], 15, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x2, tmp30, xmask) tl.store(out_ptr1 + x2, tmp76, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 72 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 9 x2 = xindex // 18 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 18 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (9 + x0 + 18 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (10, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (50, 20, 3, 3), (180, 9, 3, 1)) assert_size_stride(primals_7, (50,), (1,)) assert_size_stride(primals_8, (2, 50, 1, 1), (50, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 62, 62), (38440, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(153760)](buf1, primals_2, 153760, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 10, 31, 31), (9728, 961, 31, 1), torch.int8) buf3 = empty_strided_cuda((4, 10, 31, 31), (9610, 961, 31, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(38440)](buf1, buf2, buf3, 38440, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 20, 29, 29), (16820, 841, 29, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(67280)](buf5, primals_5, 67280, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1), torch.int8) buf7 = empty_strided_cuda((4, 20, 14, 14), (3920, 196, 14, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_3[grid(15680)](buf5, buf6, buf7, 15680, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 50, 12, 12), (7200, 144, 12, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(28800)](buf9, primals_7, 28800, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 2, 12, 12), (288, 144, 12, 1)) buf11 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) buf12 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(72)](buf10, buf11, buf12, 72, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 2, 3, 3), (18, 9, 3, 1), torch.float32) triton_poi_fused__softmax_6[grid(72)](buf11, buf13, 72, XBLOCK=128, num_warps=4, num_stages=1) del buf11 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 10, kernel_size=3) self.conv2 = nn.Conv2d(10, 20, kernel_size=3) self.conv3 = nn.Conv2d(20, 50, kernel_size=3) self.conv4 = nn.Conv2d(50, 2, kernel_size=1, bias=False, padding=0, stride=1) self.max_pool2d = nn.MaxPool2d((4, 4)) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
dqawami/openvino_training_extensions
Net
false
15,223
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
EquivariantLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fk/cfkcunh3plyysuvib63zgkougyqv2ia22pa4qcifvxy3tij7w7nx.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F from torch.nn.modules.batchnorm import _BatchNorm class MyBatchNorm1d(_BatchNorm): """Applies Batch Normalization over a 2d or 3d input that is seen as a mini-batch. .. math:: y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors of size C (where C is the input size). During training, this layer keeps a running estimate of its computed mean and variance. The running sum is kept with a default momentum of 0.1. During evaluation, this running mean/variance is used for normalization. Because the BatchNorm is done over the `C` dimension, computing statistics on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm Args: num_features: num_features from an expected input of size `batch_size x num_features [x width]` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, gives the layer learnable affine parameters. Default: ``True`` Shape: - Input: :math:`(N, C)` or :math:`(N, C, L)` - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) """ def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, momentum_decay_step=None, momentum_decay=1): super(MyBatchNorm1d, self).__init__(num_features, eps, momentum, affine ) self.momentum_decay_step = momentum_decay_step self.momentum_decay = momentum_decay self.momentum_original = self.momentum def _check_input_dim(self, input): if input.dim() != 2 and input.dim() != 3: raise ValueError('expected 2D or 3D input (got {}D input)'. format(input.dim())) super(MyBatchNorm1d, self)._check_input_dim(input) def forward(self, input, epoch=None): if (epoch is not None and epoch >= 1 and self.momentum_decay_step is not None and self.momentum_decay_step > 0): self.momentum = self.momentum_original * self.momentum_decay ** ( epoch // self.momentum_decay_step) if self.momentum < 0.01: self.momentum = 0.01 return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps) class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() def forward(self, x): return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414) class EquivariantLayer(nn.Module): def __init__(self, num_in_channels, num_out_channels, activation='relu', normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1): super(EquivariantLayer, self).__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.activation = activation self.normalization = normalization self.conv = nn.Conv1d(self.num_in_channels, self.num_out_channels, kernel_size=1, stride=1, padding=0) if 'batch' == self.normalization: self.norm = MyBatchNorm1d(self.num_out_channels, momentum= momentum, affine=True, momentum_decay_step= bn_momentum_decay_step, momentum_decay=bn_momentum_decay) elif 'instance' == self.normalization: self.norm = nn.InstanceNorm1d(self.num_out_channels, momentum= momentum, affine=True) if 'relu' == self.activation: self.act = nn.ReLU() elif 'elu' == self.activation: self.act = nn.ELU(alpha=1.0) elif 'swish' == self.activation: self.act = Swish() elif 'leakyrelu' == self.activation: self.act = nn.LeakyReLU(0.1) self.weight_init() def weight_init(self): for m in self.modules(): if isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.in_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn. InstanceNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x, epoch=None): y = self.conv(x) if self.normalization == 'batch': y = self.norm(y, epoch) elif self.normalization is not None: y = self.norm(y) if self.activation is not None: y = self.act(y) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_in_channels': 4, 'num_out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F from torch.nn.modules.batchnorm import _BatchNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2 class MyBatchNorm1d(_BatchNorm): """Applies Batch Normalization over a 2d or 3d input that is seen as a mini-batch. .. math:: y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors of size C (where C is the input size). During training, this layer keeps a running estimate of its computed mean and variance. The running sum is kept with a default momentum of 0.1. During evaluation, this running mean/variance is used for normalization. Because the BatchNorm is done over the `C` dimension, computing statistics on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm Args: num_features: num_features from an expected input of size `batch_size x num_features [x width]` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Default: 0.1 affine: a boolean value that when set to ``True``, gives the layer learnable affine parameters. Default: ``True`` Shape: - Input: :math:`(N, C)` or :math:`(N, C, L)` - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) """ def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, momentum_decay_step=None, momentum_decay=1): super(MyBatchNorm1d, self).__init__(num_features, eps, momentum, affine ) self.momentum_decay_step = momentum_decay_step self.momentum_decay = momentum_decay self.momentum_original = self.momentum def _check_input_dim(self, input): if input.dim() != 2 and input.dim() != 3: raise ValueError('expected 2D or 3D input (got {}D input)'. format(input.dim())) super(MyBatchNorm1d, self)._check_input_dim(input) def forward(self, input, epoch=None): if (epoch is not None and epoch >= 1 and self.momentum_decay_step is not None and self.momentum_decay_step > 0): self.momentum = self.momentum_original * self.momentum_decay ** ( epoch // self.momentum_decay_step) if self.momentum < 0.01: self.momentum = 0.01 return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps) class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() def forward(self, x): return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414) class EquivariantLayerNew(nn.Module): def __init__(self, num_in_channels, num_out_channels, activation='relu', normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1): super(EquivariantLayerNew, self).__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.activation = activation self.normalization = normalization self.conv = nn.Conv1d(self.num_in_channels, self.num_out_channels, kernel_size=1, stride=1, padding=0) if 'batch' == self.normalization: self.norm = MyBatchNorm1d(self.num_out_channels, momentum= momentum, affine=True, momentum_decay_step= bn_momentum_decay_step, momentum_decay=bn_momentum_decay) elif 'instance' == self.normalization: self.norm = nn.InstanceNorm1d(self.num_out_channels, momentum= momentum, affine=True) if 'relu' == self.activation: self.act = nn.ReLU() elif 'elu' == self.activation: self.act = nn.ELU(alpha=1.0) elif 'swish' == self.activation: self.act = Swish() elif 'leakyrelu' == self.activation: self.act = nn.LeakyReLU(0.1) self.weight_init() def weight_init(self): for m in self.modules(): if isinstance(m, nn.Conv1d): n = m.kernel_size[0] * m.in_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, MyBatchNorm1d) or isinstance(m, nn. InstanceNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
doudoulaile/RL-GAN-Net
EquivariantLayer
false
15,224
[ "MIT" ]
112
9c221223d1878bc24f0f39ad34928c1bb2974ae3
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
FAdd
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7j/c7jqy52lkepba3ujsuxxuvgxf7a6czq2qj5avtu744fazfubj7kh.py # Topologically Sorted Source Nodes: [add, wrapped_float32, x], Original ATen: [aten.add, aten._to_copy] # Source node to ATen node mapping: # add => add # wrapped_float32 => full_default # x => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.10000000149011612), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %full_default), kwargs = {}) triton_poi_fused__to_copy_add_0 = async_compile.triton('triton_poi_fused__to_copy_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.10000000149011612 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, wrapped_float32, x], Original ATen: [aten.add, aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class FAdd(nn.Module): def __init__(self): super(FAdd, self).__init__() def forward(self, x, y): x = x + y + np.float32(0.1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.10000000149011612 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FAddNew(nn.Module): def __init__(self): super(FAddNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dawnclaude/onnx2keras
FAdd
false
15,225
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
Embedding_Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4f/c4fhwriau4mr3duprgb45jz7sxqgne6hfefgxj7c4c2wich4h2mi.py # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.relu, aten.view, aten.threshold_backward] # Source node to ATen node mapping: # embedding => relu, view_3 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_15, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_view_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] # Source node to ATen node mapping: # linear_1 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ok/cokmm6f22cnq63mmnyxjrvxaxrwihaszvey6c2d7q3ygazjafjig.py # Topologically Sorted Source Nodes: [out_z], Original ATen: [aten.div] # Source node to ATen node mapping: # out_z => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_8, %expand), kwargs = {}) triton_poi_fused_div_2 = async_compile.triton('triton_poi_fused_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.relu, aten.view, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_view_0.run(buf1, primals_2, buf2, buf6, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_z], Original ATen: [aten.div] triton_poi_fused_div_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, buf4, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Embedding_Net(nn.Module): def __init__(self, opt): super(Embedding_Net, self).__init__() self.fc1 = nn.Linear(opt.resSize, opt.embedSize) self.fc2 = nn.Linear(opt.embedSize, opt.outzSize) self.lrelu = nn.LeakyReLU(0.2, True) self.relu = nn.ReLU(True) self.apply(weights_init) def forward(self, features): embedding = self.relu(self.fc1(features)) out_z = F.normalize(self.fc2(embedding), dim=1) return embedding, out_z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, embedSize=4, outzSize=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_view_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_view_0[grid(256)](buf1, primals_2, buf2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_view_1[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_2[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf3, buf4, primals_4, buf6 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class Embedding_NetNew(nn.Module): def __init__(self, opt): super(Embedding_NetNew, self).__init__() self.fc1 = nn.Linear(opt.resSize, opt.embedSize) self.fc2 = nn.Linear(opt.embedSize, opt.outzSize) self.lrelu = nn.LeakyReLU(0.2, True) self.relu = nn.ReLU(True) self.apply(weights_init) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
Huihui-z/CE-GZSL
Embedding_Net
false
15,226
[ "MIT" ]
58
7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
https://github.com/Huihui-z/CE-GZSL/tree/7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
GatedLinearUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6v/c6vzcw3gyn5uqhyxbbwmpum2zzhvhs66tjq2oznzcap5zo7izpvb.py # Topologically Sorted Source Nodes: [sigmoid, x_1], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # x_1 => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_3), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, buf1 class GatedLinearUnitNew(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, input_0): primals_2 = self.w4.weight primals_3 = self.w4.bias primals_4 = self.w5.weight primals_5 = self.w5.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
dqawami/openvino_training_extensions
GatedLinearUnit
false
15,227
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
Swish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/to/ctoedq6ayv4ylzvthamtdtw5ftwiohuio4r66skctp43ksli3rqz.py # Topologically Sorted Source Nodes: [sigmoid, mul, sub, mul_1], Original ATen: [aten.sigmoid, aten.mul, aten.sub] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # sigmoid => sigmoid # sub => sub # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.20662096414), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1.78718727865), kwargs = {}) triton_poi_fused_mul_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = 0.20662096414 tmp4 = tmp2 - tmp3 tmp5 = 1.78718727865 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, sub, mul_1], Original ATen: [aten.sigmoid, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() def forward(self, x): return 1.78718727865 * (x * torch.sigmoid(x) - 0.20662096414) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tmp3 = 0.20662096414 tmp4 = tmp2 - tmp3 tmp5 = 1.78718727865 tmp6 = tmp4 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SwishNew(nn.Module): def __init__(self): super(SwishNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
doudoulaile/RL-GAN-Net
Swish
false
15,228
[ "MIT" ]
112
9c221223d1878bc24f0f39ad34928c1bb2974ae3
https://github.com/doudoulaile/RL-GAN-Net/tree/9c221223d1878bc24f0f39ad34928c1bb2974ae3
GNNLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wv/cwvsxo4q6wyoxpozsubbimmg6xvl34ow44hy6yl5mwa23uuy77sa.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) del buf0 buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 16, grid=grid(16), stream=stream0) return (buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GNNLayer(Module): def __init__(self, in_features, out_features): super(GNNLayer, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) torch.nn.init.xavier_uniform_(self.weight) def forward(self, features, adj, active=True): support = torch.mm(features, self.weight) output = torch.spmm(adj, support) if active: output = F.relu(output) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GNNLayerNew(Module): def __init__(self, in_features, out_features): super(GNNLayerNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) torch.nn.init.xavier_uniform_(self.weight) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
drzhang3/SDCN
GNNLayer
false
15,229
[ "Apache-2.0" ]
146
3d11365bcb4af2cbe9625362737f1224aeea3b72
https://github.com/drzhang3/SDCN/tree/3d11365bcb4af2cbe9625362737f1224aeea3b72
RGBDiff
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fl/cfllfuxu6opan6gqlsakp4ldgmsmzrsbicbfnt43sgrqiliz3dwx.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %sub_1, %sub_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 2, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 3, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 - tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp19, tmp24, tmp25) tmp27 = tl.where(tmp13, tmp18, tmp26) tmp28 = tl.where(tmp4, tmp9, tmp27) tl.store(out_ptr0 + (x3), tmp28, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class RGBDiff(nn.Module): def __init__(self, dim=1): super().__init__() self.dim = dim def forward(self, image): """ Args: image (torch.Tensor): (N x T x C x H x W) """ diffs = [] for i in range(1, image.size(self.dim)): prev = image.index_select(self.dim, image.new_tensor(i - 1, dtype=torch.long)) current = image.index_select(self.dim, image.new_tensor(i, dtype=torch.long)) diffs.append(current - prev) return torch.cat(diffs, dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 2, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tl.full([1], 3, tl.int64) tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 - tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp19, tmp24, tmp25) tmp27 = tl.where(tmp13, tmp18, tmp26) tmp28 = tl.where(tmp4, tmp9, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class RGBDiffNew(nn.Module): def __init__(self, dim=1): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dqawami/openvino_training_extensions
RGBDiff
false
15,230
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
StddevLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6t/c6tqcmavbxvodp7g3tqzdedwkq34iy2jx5qkzqf4icv2fg5hvbil.py # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, mean_2, z], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.cat] # Source node to ATen node mapping: # add => add # mean => mean # mean_2 => mean_2 # pow_1 => pow_1 # y_1 => sub # y_2 => mean_1 # y_3 => pow_2 # z => cat # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [3, 4, 5], True), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {}) triton_per_fused_add_cat_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_cat_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cat_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, mean_2, z], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.cat] stream0 = get_raw_stream(0) triton_per_fused_add_cat_mean_pow_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class StddevLayer(nn.Module): def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = 4 self.num_new_features = 1 def forward(self, x): b, c, h, w = x.shape group_size = min(self.group_size, b) y = x.reshape([group_size, -1, self.num_new_features, c // self. num_new_features, h, w]) y = y - y.mean(0, keepdim=True) y = (y ** 2).mean(0, keepdim=True) y = (y + 1e-08) ** 0.5 y = y.mean([3, 4, 5], keepdim=True).squeeze(3) y = y.expand(group_size, -1, -1, h, w).clone().reshape(b, self. num_new_features, h, w) z = torch.cat([x, y], dim=1) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_cat_mean_pow_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf3, class StddevLayerNew(nn.Module): def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = 4 self.num_new_features = 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dubtor/EditGAN-Robert
StddevLayer
false
15,231
[ "BSD-2-Clause" ]
110
8e6d80e7647c3536827f11cf0a9abf51c42794b2
https://github.com/dubtor/EditGAN-Robert/tree/8e6d80e7647c3536827f11cf0a9abf51c42794b2
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rm/crmfikkxblrhxfynyknfm2x3wwcwtibkjkkbyhzwmxqi4kmwkosl.py # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # log_std_1 => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_7, -20), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_7, -20), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_7, 2), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_1 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_1.run(buf5, primals_9, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_9 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), buf7, primals_8, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size= 32, init_w=0.003, log_std_min=-20, log_std_max=2): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.log_std_min = log_std_min self.log_std_max = log_std_max self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.mu = nn.Linear(hidden_size, action_size) self.log_std_linear = nn.Linear(hidden_size, action_size) def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.mu.weight.data.uniform_(-init_w, init_w) self.log_std_linear.weight.data.uniform_(-init_w, init_w) def forward(self, state): x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) mu = self.mu(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) return mu, log_std def evaluate(self, state, epsilon=1e-06): mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon) ).sum(1, keepdim=True) return action, log_prob def get_action(self, state): """ returns the action based on a squashed gaussian policy. That means the samples are obtained according to: a(s,e)= tanh(mu(s)+sigma(s)+e) """ mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) return action.detach().cpu() def get_det_action(self, state): mu, _log_std = self.forward(state) return torch.tanh(mu).detach().cpu() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf9, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_1[grid(256)](buf5, primals_9, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_9 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0 ), buf7, primals_8, primals_6, buf8, primals_4, buf9 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size= 32, init_w=0.003, log_std_min=-20, log_std_max=2): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.log_std_min = log_std_min self.log_std_max = log_std_max self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.mu = nn.Linear(hidden_size, action_size) self.log_std_linear = nn.Linear(hidden_size, action_size) def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.mu.weight.data.uniform_(-init_w, init_w) self.log_std_linear.weight.data.uniform_(-init_w, init_w) def evaluate(self, state, epsilon=1e-06): mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon) ).sum(1, keepdim=True) return action, log_prob def get_action(self, state): """ returns the action based on a squashed gaussian policy. That means the samples are obtained according to: a(s,e)= tanh(mu(s)+sigma(s)+e) """ mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) return action.detach().cpu() def get_det_action(self, state): mu, _log_std = self.forward(state) return torch.tanh(mu).detach().cpu() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.mu.weight primals_7 = self.mu.bias primals_8 = self.log_std_linear.weight primals_9 = self.log_std_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
drib861204/Soft-Actor-Critic-and-Extensions
Actor
false
15,232
[ "MIT" ]
143
3075df7430c1c49177b3798d753a9e3f6226672e
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
PositionWiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py # Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu] # Source node to ATen node mapping: # n2 => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/en/cenabni3h77zazokaukc6skf7kuv4ta2awact6sfvxbq7che2ucu.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # grn => var_mean # sigmoid => sigmoid # x_1 => mul_3 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp22 = tl.sigmoid(tmp21) tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cuj7uutcqmly235uixsc4lxbmmowhsqrjmbzktxbwhqmb7hgix5q.py # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # grn => add_1, add_2, mul_4, mul_5, rsqrt, sub # sigmoid => sigmoid # x_1 => mul_3 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_10), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_11), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_2 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sa/csam4qbluetpmxn3expxgvgvulpbjidzlkrgpt7lcqb7aa4p6d5x.py # Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_3 # out => var_mean_1 # sigmoid_1 => sigmoid_1 # x_3 => mul_6 # Graph fragment: # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_9,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %view_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_16), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_3 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp6) tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl.sigmoid(tmp20) tmp23 = tmp21 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bw/cbw3jqoeotoyoa3d4nplpzdidkkeatsdk7ho5hysa2lq3gbdtqjv.py # Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_3 # out => add_4, add_5, mul_7, mul_8, rsqrt_1, sub_1 # sigmoid_1 => sigmoid_1 # x_3 => mul_6 # Graph fragment: # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_9,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %view_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_16), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %primals_17), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %primals_18), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_sigmoid_4 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [n1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, buf3, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_2.run(primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, grid=grid(256), stream=stream0) del primals_11 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_13 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_15 buf10 = buf6; del buf6 # reuse buf11 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_3.run(buf8, buf9, primals_16, buf10, buf11, 64, grid=grid(64), stream=stream0) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_sigmoid_4.run(buf8, buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12, 256, grid=grid(256), stream=stream0) del buf10 del buf11 del primals_18 return (buf12, primals_3, primals_10, primals_16, primals_17, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9, primals_14, primals_12, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GateAddNorm(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.glu = GatedLinearUnit(input_size, output_size, dropout) self.norm = nn.LayerNorm(output_size) def forward(self, x, skip): return self.norm(self.glu(x) + skip) class GatedResidualNetwork(nn.Module): def __init__(self, input_size, hidden_size, output_size, context_size= None, dropout=0): super().__init__() self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(input_size, hidden_size) self.w3 = None if context_size is None else nn.Linear(context_size, hidden_size, bias=False) self.glu = GatedLinearUnit(hidden_size, output_size, dropout) self.layer_norm = nn.LayerNorm(output_size) self.residual = nn.Sequential( ) if input_size == output_size else nn.Linear(input_size, output_size) def forward(self, a, c=None): if c is not None: n2 = F.elu(self.w2(a) + self.w3(c)) else: n2 = F.elu(self.w2(a)) n1 = self.w1(n2) grn = self.layer_norm(self.residual(a) + self.glu(n1)) return grn class PositionWiseFeedForward(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.grn = GatedResidualNetwork(input_size=input_size, hidden_size= input_size, output_size=output_size, dropout=dropout) self.gate_add_norm = GateAddNorm(input_size=input_size, output_size =output_size, dropout=dropout) def forward(self, x, skip): out = self.grn(x) out = self.gate_add_norm(out, skip) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import functional as F from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp22 = tl.sigmoid(tmp21) tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp6) tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp16 = tmp14 * tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp21 = tl.sigmoid(tmp20) tmp23 = tmp21 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 - tmp6 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tmp12 = tmp7 * tmp11 tmp14 = tmp12 * tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_9 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)]( primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)]( primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_13 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_15 buf10 = buf6 del buf6 buf11 = buf5 del buf5 triton_poi_fused_add_mul_native_layer_norm_sigmoid_3[grid(64)](buf8, buf9, primals_16, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_sigmoid_4[grid(256)](buf8, buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del buf11 del primals_18 return (buf12, primals_3, primals_10, primals_16, primals_17, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9, primals_14, primals_12, primals_8, primals_6, primals_4) class GatedLinearUnit(nn.Module): def __init__(self, input_size, output_size, dropout=0): super().__init__() self.dropout = nn.Dropout(dropout) self.w4 = nn.Linear(input_size, output_size) self.w5 = nn.Linear(input_size, output_size) self.act = nn.Sigmoid() def forward(self, x): x = self.dropout(x) x = self.act(self.w4(x)) * self.w5(x) return x class GateAddNorm(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.glu = GatedLinearUnit(input_size, output_size, dropout) self.norm = nn.LayerNorm(output_size) def forward(self, x, skip): return self.norm(self.glu(x) + skip) class GatedResidualNetwork(nn.Module): def __init__(self, input_size, hidden_size, output_size, context_size= None, dropout=0): super().__init__() self.w1 = nn.Linear(hidden_size, hidden_size) self.w2 = nn.Linear(input_size, hidden_size) self.w3 = None if context_size is None else nn.Linear(context_size, hidden_size, bias=False) self.glu = GatedLinearUnit(hidden_size, output_size, dropout) self.layer_norm = nn.LayerNorm(output_size) self.residual = nn.Sequential( ) if input_size == output_size else nn.Linear(input_size, output_size) def forward(self, a, c=None): if c is not None: n2 = F.elu(self.w2(a) + self.w3(c)) else: n2 = F.elu(self.w2(a)) n1 = self.w1(n2) grn = self.layer_norm(self.residual(a) + self.glu(n1)) return grn class PositionWiseFeedForwardNew(nn.Module): def __init__(self, input_size, output_size, dropout): super().__init__() self.grn = GatedResidualNetwork(input_size=input_size, hidden_size= input_size, output_size=output_size, dropout=dropout) self.gate_add_norm = GateAddNorm(input_size=input_size, output_size =output_size, dropout=dropout) def forward(self, input_0, input_1): primals_1 = self.grn.w1.weight primals_2 = self.grn.w1.bias primals_4 = self.grn.w2.weight primals_5 = self.grn.w2.bias primals_6 = self.grn.glu.w4.weight primals_7 = self.grn.glu.w4.bias primals_8 = self.grn.glu.w5.weight primals_9 = self.grn.glu.w5.bias primals_10 = self.grn.layer_norm.weight primals_11 = self.grn.layer_norm.bias primals_12 = self.gate_add_norm.glu.w4.weight primals_13 = self.gate_add_norm.glu.w4.bias primals_14 = self.gate_add_norm.glu.w5.weight primals_15 = self.gate_add_norm.glu.w5.bias primals_17 = self.gate_add_norm.norm.weight primals_18 = self.gate_add_norm.norm.bias primals_3 = input_0 primals_16 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
dqawami/openvino_training_extensions
PositionWiseFeedForward
false
15,233
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i3/ci3nuuurbsrmcufle642yc7udhwn4itsu6aptfssij5nzrnylpne.py # Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1d => convolution # output => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/in/ciniyjn7eyz6kfao5xoph2rbugonh4ujhobeqsni3egmy2cyb6jq.py # Topologically Sorted Source Nodes: [add, mu, sigma], Original ATen: [aten.add, aten.mean, aten.std] # Source node to ATen node mapping: # add => add # mu => mean # sigma => var # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True}) triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + (x2), tmp29, xmask) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3p/c3pxygonyvwt7htiobzn7yqzmectxzeqvh7ezkgsvmrrsjmztpuc.py # Topologically Sorted Source Nodes: [add, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # ln_out => div # ln_out_1 => add_2 # mul => mul # sub => sub # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 0.001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_4 = async_compile.triton('triton_poi_fused_add_div_mul_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.001 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, mu, sigma], Original ATen: [aten.add, aten.mean, aten.std] triton_poi_fused_add_mean_std_3.run(buf6, buf4, primals_1, buf7, 16, grid=grid(16), stream=stream0) buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_mul_sub_4.run(buf4, primals_1, buf7, buf6, primals_6, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del buf6 del buf7 del primals_7 return (buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torchvision import models as models import torch.onnx import torch.nn class Identity(nn.Module): def forward(self, input_): return input_ class LayerNormalization(nn.Module): """ Layer normalization module """ def __init__(self, d_hid, eps=0.001): super(LayerNormalization, self).__init__() self.eps = eps self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True) def forward(self, z): if z.size(1) == 1: return z mu = torch.mean(z, keepdim=True, dim=-1) sigma = torch.std(z, keepdim=True, dim=-1) ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps) ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as( ln_out) return ln_out class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) self.layer_norm = LayerNormalization(d_hid ) if layer_norm else Identity() self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, x): residual = x output = self.relu(self.w_1(x.transpose(1, 2))) output = self.w_2(output).transpose(2, 1) output = self.dropout(output) return self.layer_norm(output + residual) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_hid': 4, 'd_inner_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torchvision import models as models import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = libdevice.sqrt(tmp5) tmp7 = 0.001 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_std_3[grid(16)](buf6, buf4, primals_1, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0) del buf0 triton_poi_fused_add_div_mul_sub_4[grid(16, 4)](buf4, primals_1, buf7, buf6, primals_6, primals_7, buf8, 16, 4, XBLOCK=4, YBLOCK =16, num_warps=1, num_stages=1) del buf6 del buf7 del primals_7 return buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class Identity(nn.Module): def forward(self, input_): return input_ class LayerNormalization(nn.Module): """ Layer normalization module """ def __init__(self, d_hid, eps=0.001): super(LayerNormalization, self).__init__() self.eps = eps self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True) def forward(self, z): if z.size(1) == 1: return z mu = torch.mean(z, keepdim=True, dim=-1) sigma = torch.std(z, keepdim=True, dim=-1) ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps) ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as( ln_out) return ln_out class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_hid, d_inner_hid, dropout=0.1, layer_norm=True): super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1) self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1) self.layer_norm = LayerNormalization(d_hid ) if layer_norm else Identity() self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.a_2 primals_7 = self.layer_norm.b_2 primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
dqawami/openvino_training_extensions
PositionwiseFeedForward
false
15,234
[ "Apache-2.0" ]
256
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
https://github.com/dqawami/openvino_training_extensions/tree/dddda1dfd651eaae2d59cecda84275b1b03bd0ad
C3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/62/c62gomswfhcqbctmskm64fye6pj3e5wayyyjyf4exb6oeym7ssid.py # Topologically Sorted Source Nodes: [conv3d, h], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d => convolution # h => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[67108864], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51380224 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 200704) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uy/cuysustlzk2y4eskrbjr6wdtafn4jfib34ptgmml47efk5bqty7x.py # Topologically Sorted Source Nodes: [conv3d_1, h_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d_1 => convolution_1 # h_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25690112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 50176) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6d/c6dpfwxvgkmexxvtt4mw3jpq6momekrbxyubd7p4uce5hwniltmc.py # Topologically Sorted Source Nodes: [conv3d_2, h_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d_2 => convolution_2 # h_4 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6422528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 6272) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7w/c7wsbmun2kousjqh3kbsght3kk4bxvs4vm5omeppffzoibl67cnm.py # Topologically Sorted Source Nodes: [conv3d_4, h_7], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d_4 => convolution_4 # h_7 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_10, %primals_11, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1605632 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 784) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ko/ckogoscouok75fhbjm7yd6kjtfx6rymxoihvncd6m5dlvtqlidus.py # Topologically Sorted Source Nodes: [conv3d_6, h_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv3d_6 => convolution_6 # h_10 => relu_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_14, %primals_15, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 98) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bk/cbkqnbcha27zstr2wxwko4mejuufux5otm53ft5f5dgipxz4owhb.py # Topologically Sorted Source Nodes: [h_14], Original ATen: [aten.relu] # Source node to ATen node mapping: # h_14 => relu_8 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_19), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args args.clear() assert_size_stride(primals_1, (4, 3, 16, 112, 112), (602112, 200704, 12544, 112, 1)) assert_size_stride(primals_2, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_11, (512, ), (1, )) assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_13, (512, ), (1, )) assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (4096, 8192), (8192, 1)) assert_size_stride(primals_19, (4096, ), (1, )) assert_size_stride(primals_20, (4096, 4096), (4096, 1)) assert_size_stride(primals_21, (4096, ), (1, )) assert_size_stride(primals_22, (174, 4096), (4096, 1)) assert_size_stride(primals_23, (174, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 16, 112, 112), (12845056, 200704, 12544, 112, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv3d, h], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 51380224, grid=grid(51380224), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.max_pool3d_with_indices] buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2, 2], [1, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 # Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 16, 56, 56), (6422528, 50176, 3136, 56, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv3d_1, h_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf6, primals_5, 25690112, grid=grid(25690112), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.max_pool3d_with_indices] buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 # Topologically Sorted Source Nodes: [conv3d_2], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 8, 28, 28), (1605632, 6272, 784, 28, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [conv3d_2, h_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf11, primals_7, 6422528, grid=grid(6422528), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv3d_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 8, 28, 28), (1605632, 6272, 784, 28, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv3d_3, h_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf13, primals_9, 6422528, grid=grid(6422528), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [h_6], Original ATen: [aten.max_pool3d_with_indices] buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2, 2], [2, 2, 2]) buf15 = buf14[0] buf16 = buf14[1] del buf14 # Topologically Sorted Source Nodes: [conv3d_4], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 512, 4, 14, 14), (401408, 784, 196, 14, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv3d_4, h_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf18, primals_11, 1605632, grid=grid(1605632), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv3d_5], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 512, 4, 14, 14), (401408, 784, 196, 14, 1)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [conv3d_5, h_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf20, primals_13, 1605632, grid=grid(1605632), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool3d_with_indices] buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2, 2], [2, 2, 2]) buf22 = buf21[0] buf23 = buf21[1] del buf21 # Topologically Sorted Source Nodes: [conv3d_6], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv3d_6, h_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf25, primals_15, 200704, grid=grid(200704), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv3d_7], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv3d_7, h_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf27, primals_17, 200704, grid=grid(200704), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [h_12], Original ATen: [aten.max_pool3d_with_indices] buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2, 2], [2, 2, 2], [0, 1, 1]) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf29, (4, 8192), (8192, 1), 0), reinterpret_tensor(primals_18, (8192, 4096), (1, 8192), 0), out=buf31) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [h_14], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf32, primals_19, 16384, grid=grid(16384), stream=stream0) del primals_19 buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (4096, 4096), (1, 4096), 0), out=buf33) buf34 = buf33; del buf33 # reuse # Topologically Sorted Source Nodes: [h_16], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf34, primals_21, 16384, grid=grid(16384), stream=stream0) del primals_21 buf35 = empty_strided_cuda((4, 174), (174, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_23, buf34, reinterpret_tensor(primals_22, (4096, 174), (1, 4096), 0), alpha=1, beta=1, out=buf35) del primals_23 return (buf35, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_1, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22, buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (4, 8192), (8192, 1), 0), buf32, buf34, primals_22, primals_20, primals_18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 16, 112, 112), (602112, 200704, 12544, 112, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 3, 3, 3, 3), (81, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 3, 3, 3), (1728, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 3, 3, 3), (3456, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3, 3), (6912, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, 256, 3, 3, 3), (6912, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 512, 3, 3, 3), (13824, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4096, 8192), (8192, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((174, 4096), (4096, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((174, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torchvision import torch.nn.parallel import torch.optim from torch import nn class GroupMultiScaleCrop(object): def __init__(self, input_size, scales=None, max_distort=1, fix_crop= True, more_fix_crop=True): self.scales = scales if scales is not None else [1, 875, 0.75, 0.66] self.max_distort = max_distort self.fix_crop = fix_crop self.more_fix_crop = more_fix_crop self.input_size = input_size if not isinstance(input_size, int) else [ input_size, input_size] self.interpolation = Image.BILINEAR def __call__(self, img_group): im_size = img_group[0].size crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group] ret_img_group = [img.resize((self.input_size[0], self.input_size[1] ), self.interpolation) for img in crop_img_group] return ret_img_group def _sample_crop_size(self, im_size): image_w, image_h = im_size[0], im_size[1] base_size = min(image_w, image_h) crop_sizes = [int(base_size * x) for x in self.scales] crop_h = [(self.input_size[1] if abs(x - self.input_size[1]) < 3 else x) for x in crop_sizes] crop_w = [(self.input_size[0] if abs(x - self.input_size[0]) < 3 else x) for x in crop_sizes] pairs = [] for i, h in enumerate(crop_h): for j, w in enumerate(crop_w): if abs(i - j) <= self.max_distort: pairs.append((w, h)) crop_pair = random.choice(pairs) if not self.fix_crop: w_offset = random.randint(0, image_w - crop_pair[0]) h_offset = random.randint(0, image_h - crop_pair[1]) else: w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1]) return crop_pair[0], crop_pair[1], w_offset, h_offset def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h) return random.choice(offsets) @staticmethod def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): w_step = (image_w - crop_w) // 4 h_step = (image_h - crop_h) // 4 ret = list() ret.append((0, 0)) ret.append((4 * w_step, 0)) ret.append((0, 4 * h_step)) ret.append((4 * w_step, 4 * h_step)) ret.append((2 * w_step, 2 * h_step)) if more_fix_crop: ret.append((0, 2 * h_step)) ret.append((4 * w_step, 2 * h_step)) ret.append((2 * w_step, 4 * h_step)) ret.append((2 * w_step, 0 * h_step)) ret.append((1 * w_step, 1 * h_step)) ret.append((3 * w_step, 1 * h_step)) ret.append((1 * w_step, 3 * h_step)) ret.append((3 * w_step, 3 * h_step)) return ret class GroupRandomHorizontalFlip(object): """Randomly horizontally flips the given PIL.Image with a probability of 0.5 """ def __init__(self, is_flow=False): self.is_flow = is_flow def __call__(self, img_group, is_flow=False): v = random.random() if v < 0.5: ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] if self.is_flow: for i in range(0, len(ret), 2): ret[i] = ImageOps.invert(ret[i]) return ret else: return img_group class C3D(nn.Module): def __init__(self): super(C3D, self).__init__() self.modality = 'RGB' self.input_size = 112 self.input_mean = [104, 117, 128] self.input_std = [1] self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8_new = nn.Linear(4096, 174) self.dropout = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.softmax = nn.Softmax() def forward(self, x): x.size(0) x = x.view(-1, 3, 16, 112, 112) h = self.relu(self.conv1(x)) h = self.pool1(h) h = self.relu(self.conv2(h)) h = self.pool2(h) h = self.relu(self.conv3a(h)) h = self.relu(self.conv3b(h)) h = self.pool3(h) h = self.relu(self.conv4a(h)) h = self.relu(self.conv4b(h)) h = self.pool4(h) h = self.relu(self.conv5a(h)) h = self.relu(self.conv5b(h)) h = self.pool5(h) h = h.view(-1, 8192) h = self.relu(self.fc6(h)) h = self.dropout(h) h = self.relu(self.fc7(h)) h = self.dropout(h) logits = self.fc8_new(h) return logits def partialBN(self, enable): pass @property def crop_size(self): return self.input_size @property def scale_size(self): return self.input_size * 128 // 112 def get_augmentation(self): if self.modality == 'RGB': return torchvision.transforms.Compose([GroupMultiScaleCrop(self .input_size, [1, 0.875, 0.75, 0.66]), GroupRandomHorizontalFlip(is_flow=False)]) def get_inputs(): return [torch.rand([4, 3, 16, 112, 112])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torchvision import torch.nn.parallel import torch.optim from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 200704 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 50176 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 6272 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 784 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 98 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 ) = args args.clear() assert_size_stride(primals_1, (4, 3, 16, 112, 112), (602112, 200704, 12544, 112, 1)) assert_size_stride(primals_2, (64, 3, 3, 3, 3), (81, 27, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (128, 64, 3, 3, 3), (1728, 27, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3, 3), (3456, 27, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3, 3), (6912, 27, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3, 3), (13824, 27, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (4096, 8192), (8192, 1)) assert_size_stride(primals_19, (4096,), (1,)) assert_size_stride(primals_20, (4096, 4096), (4096, 1)) assert_size_stride(primals_21, (4096,), (1,)) assert_size_stride(primals_22, (174, 4096), (4096, 1)) assert_size_stride(primals_23, (174,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 16, 112, 112), (12845056, 200704, 12544, 112, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(51380224)](buf1, primals_3, 51380224, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [1, 2, 2], [1, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 16, 56, 56), (6422528, 50176, 3136, 56, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_1[grid(25690112)](buf6, primals_5, 25690112, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [2, 2, 2], [2, 2, 2]) buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1, 1 ), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 8, 28, 28), (1605632, 6272, 784, 28, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_2[grid(6422528)](buf11, primals_7, 6422528, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 8, 28, 28), (1605632, 6272, 784, 28, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_2[grid(6422528)](buf13, primals_9, 6422528, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf14 = torch.ops.aten.max_pool3d_with_indices.default(buf13, [2, 2, 2], [2, 2, 2]) buf15 = buf14[0] buf16 = buf14[1] del buf14 buf17 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 512, 4, 14, 14), (401408, 784, 196, 14, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_3[grid(1605632)](buf18, primals_11, 1605632, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf19 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 512, 4, 14, 14), (401408, 784, 196, 14, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_relu_3[grid(1605632)](buf20, primals_13, 1605632, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf21 = torch.ops.aten.max_pool3d_with_indices.default(buf20, [2, 2, 2], [2, 2, 2]) buf22 = buf21[0] buf23 = buf21[1] del buf21 buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_4[grid(200704)](buf25, primals_15, 200704, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, primals_16, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 2, 7, 7), (50176, 98, 49, 7, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_4[grid(200704)](buf27, primals_17, 200704, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf28 = torch.ops.aten.max_pool3d_with_indices.default(buf27, [2, 2, 2], [2, 2, 2], [0, 1, 1]) buf29 = buf28[0] buf30 = buf28[1] del buf28 buf31 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf29, (4, 8192), (8192, 1), 0 ), reinterpret_tensor(primals_18, (8192, 4096), (1, 8192), 0), out=buf31) buf32 = buf31 del buf31 triton_poi_fused_relu_5[grid(16384)](buf32, primals_19, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32) extern_kernels.mm(buf32, reinterpret_tensor(primals_20, (4096, 4096 ), (1, 4096), 0), out=buf33) buf34 = buf33 del buf33 triton_poi_fused_relu_5[grid(16384)](buf34, primals_21, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf35 = empty_strided_cuda((4, 174), (174, 1), torch.float32) extern_kernels.addmm(primals_23, buf34, reinterpret_tensor( primals_22, (4096, 174), (1, 4096), 0), alpha=1, beta=1, out=buf35) del primals_23 return (buf35, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_1, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf15, buf16, buf18, buf20, buf22, buf23, buf25, buf27, buf30, reinterpret_tensor(buf29, (4, 8192), ( 8192, 1), 0), buf32, buf34, primals_22, primals_20, primals_18) class GroupMultiScaleCrop(object): def __init__(self, input_size, scales=None, max_distort=1, fix_crop= True, more_fix_crop=True): self.scales = scales if scales is not None else [1, 875, 0.75, 0.66] self.max_distort = max_distort self.fix_crop = fix_crop self.more_fix_crop = more_fix_crop self.input_size = input_size if not isinstance(input_size, int) else [ input_size, input_size] self.interpolation = Image.BILINEAR def __call__(self, img_group): im_size = img_group[0].size crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group] ret_img_group = [img.resize((self.input_size[0], self.input_size[1] ), self.interpolation) for img in crop_img_group] return ret_img_group def _sample_crop_size(self, im_size): image_w, image_h = im_size[0], im_size[1] base_size = min(image_w, image_h) crop_sizes = [int(base_size * x) for x in self.scales] crop_h = [(self.input_size[1] if abs(x - self.input_size[1]) < 3 else x) for x in crop_sizes] crop_w = [(self.input_size[0] if abs(x - self.input_size[0]) < 3 else x) for x in crop_sizes] pairs = [] for i, h in enumerate(crop_h): for j, w in enumerate(crop_w): if abs(i - j) <= self.max_distort: pairs.append((w, h)) crop_pair = random.choice(pairs) if not self.fix_crop: w_offset = random.randint(0, image_w - crop_pair[0]) h_offset = random.randint(0, image_h - crop_pair[1]) else: w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1]) return crop_pair[0], crop_pair[1], w_offset, h_offset def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h) return random.choice(offsets) @staticmethod def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): w_step = (image_w - crop_w) // 4 h_step = (image_h - crop_h) // 4 ret = list() ret.append((0, 0)) ret.append((4 * w_step, 0)) ret.append((0, 4 * h_step)) ret.append((4 * w_step, 4 * h_step)) ret.append((2 * w_step, 2 * h_step)) if more_fix_crop: ret.append((0, 2 * h_step)) ret.append((4 * w_step, 2 * h_step)) ret.append((2 * w_step, 4 * h_step)) ret.append((2 * w_step, 0 * h_step)) ret.append((1 * w_step, 1 * h_step)) ret.append((3 * w_step, 1 * h_step)) ret.append((1 * w_step, 3 * h_step)) ret.append((3 * w_step, 3 * h_step)) return ret class GroupRandomHorizontalFlip(object): """Randomly horizontally flips the given PIL.Image with a probability of 0.5 """ def __init__(self, is_flow=False): self.is_flow = is_flow def __call__(self, img_group, is_flow=False): v = random.random() if v < 0.5: ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] if self.is_flow: for i in range(0, len(ret), 2): ret[i] = ImageOps.invert(ret[i]) return ret else: return img_group class C3DNew(nn.Module): def __init__(self): super(C3DNew, self).__init__() self.modality = 'RGB' self.input_size = 112 self.input_mean = [104, 117, 128] self.input_std = [1] self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) self.fc6 = nn.Linear(8192, 4096) self.fc7 = nn.Linear(4096, 4096) self.fc8_new = nn.Linear(4096, 174) self.dropout = nn.Dropout(p=0.5) self.relu = nn.ReLU() self.softmax = nn.Softmax() def partialBN(self, enable): pass @property def crop_size(self): return self.input_size @property def scale_size(self): return self.input_size * 128 // 112 def get_augmentation(self): if self.modality == 'RGB': return torchvision.transforms.Compose([GroupMultiScaleCrop(self .input_size, [1, 0.875, 0.75, 0.66]), GroupRandomHorizontalFlip(is_flow=False)]) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3a.weight primals_7 = self.conv3a.bias primals_8 = self.conv3b.weight primals_9 = self.conv3b.bias primals_10 = self.conv4a.weight primals_11 = self.conv4a.bias primals_12 = self.conv4b.weight primals_13 = self.conv4b.bias primals_14 = self.conv5a.weight primals_15 = self.conv5a.bias primals_16 = self.conv5b.weight primals_17 = self.conv5b.bias primals_18 = self.fc6.weight primals_19 = self.fc6.bias primals_20 = self.fc7.weight primals_21 = self.fc7.bias primals_22 = self.fc8_new.weight primals_23 = self.fc8_new.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23]) return output[0]
coderSkyChen/Action_Recognition_Zoo
C3D
false
15,235
[ "MIT" ]
240
92ec5ec3efeee852aec5c057798298cd3a8e58ae
https://github.com/coderSkyChen/Action_Recognition_Zoo/tree/92ec5ec3efeee852aec5c057798298cd3a8e58ae
DeepCritic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] # Source node to ATen node mapping: # xu => cat # Graph fragment: # %cat : [num_users=5] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6v/c6v2uta54pkhneoqskozziorif2evnswfmlfn2njbf5xwordyymp.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %cat], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 40 x1 = (xindex // 40) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((32*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 40, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((8*x1) + ((-32) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j3/cj3skkmeof32bawbrph6liql2ib2zomhrw6fu7ygx6aiswlahhrr.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_6 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_10), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3m/c3mnc3w2fowp5vtbgub6g2jkwsfwatrw4z7fr6bbwbrq7ztl7slt.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_8), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (32, 8), (8, 1)) assert_size_stride(primals_4, (32, ), (1, )) assert_size_stride(primals_5, (32, 40), (40, 1)) assert_size_stride(primals_6, (32, ), (1, )) assert_size_stride(primals_7, (32, 40), (40, 1)) assert_size_stride(primals_8, (32, ), (1, )) assert_size_stride(primals_9, (32, 40), (40, 1)) assert_size_stride(primals_10, (32, ), (1, )) assert_size_stride(primals_11, (1, 32), (32, 1)) assert_size_stride(primals_12, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1, 8), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 40), (40, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, primals_4, buf0, buf2, 160, grid=grid(160), stream=stream0) buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (40, 32), (1, 40), 0), out=buf3) buf4 = empty_strided_cuda((4, 40), (40, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf3, primals_6, buf0, buf4, 160, grid=grid(160), stream=stream0) buf5 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (40, 32), (1, 40), 0), out=buf5) buf6 = empty_strided_cuda((4, 40), (40, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf5, primals_8, buf0, buf6, 160, grid=grid(160), stream=stream0) buf7 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf6, reinterpret_tensor(primals_9, (40, 32), (1, 40), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf8, primals_10, 128, grid=grid(128), stream=stream0) del primals_10 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(primals_11, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf10) del primals_12 buf11 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf5, primals_8, buf11, 128, grid=grid(128), stream=stream0) del buf5 del primals_8 buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf3, primals_6, buf12, 128, grid=grid(128), stream=stream0) del buf3 del primals_6 buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf1, primals_4, buf13, 128, grid=grid(128), stream=stream0) del buf1 del primals_4 return (buf10, buf0, buf2, buf4, buf6, buf8, primals_11, primals_9, buf11, primals_7, buf12, primals_5, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, 40), (40, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, 40), (40, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, 40), (40, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DeepCritic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_size (int): Number of nodes in the network layers """ super(DeepCritic, self).__init__() self.seed = torch.manual_seed(seed) self.device = device in_dim = hidden_size + action_size + state_size self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(in_dim, hidden_size) self.fc3 = nn.Linear(in_dim, hidden_size) self.fc4 = nn.Linear(in_dim, hidden_size) self.fc5 = nn.Linear(hidden_size, 1) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc2)) self.fc4.weight.data.uniform_(*hidden_init(self.fc2)) self.fc5.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xu = torch.cat((state, action), dim=1) x = F.relu(self.fc1(xu)) x = torch.cat([x, xu], dim=1) x = F.relu(self.fc2(x)) x = torch.cat([x, xu], dim=1) x = F.relu(self.fc3(x)) x = torch.cat([x, xu], dim=1) x = F.relu(self.fc4(x)) return self.fc5(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 40 x1 = xindex // 40 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 40, tl.int64) tmp15 = tl.load(in_ptr2 + (8 * x1 + (-32 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (32, 8), (8, 1)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32, 40), (40, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (32, 40), (40, 1)) assert_size_stride(primals_8, (32,), (1,)) assert_size_stride(primals_9, (32, 40), (40, 1)) assert_size_stride(primals_10, (32,), (1,)) assert_size_stride(primals_11, (1, 32), (32, 1)) assert_size_stride(primals_12, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1, 8), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 40), (40, 1), torch.float32) triton_poi_fused_cat_1[grid(160)](buf1, primals_4, buf0, buf2, 160, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (40, 32), (1, 40), 0), out=buf3) buf4 = empty_strided_cuda((4, 40), (40, 1), torch.float32) triton_poi_fused_cat_1[grid(160)](buf3, primals_6, buf0, buf4, 160, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (40, 32), (1, 40), 0), out=buf5) buf6 = empty_strided_cuda((4, 40), (40, 1), torch.float32) triton_poi_fused_cat_1[grid(160)](buf5, primals_8, buf0, buf6, 160, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_9, (40, 32), (1, 40), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_2[grid(128)](buf8, primals_10, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_10 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_12, buf8, reinterpret_tensor( primals_11, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf10) del primals_12 buf11 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf5, primals_8, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_8 buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf3, primals_6, buf12, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_6 buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf1, primals_4, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 return (buf10, buf0, buf2, buf4, buf6, buf8, primals_11, primals_9, buf11, primals_7, buf12, primals_5, buf13) def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DeepCriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_size (int): Number of nodes in the network layers """ super(DeepCriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.device = device in_dim = hidden_size + action_size + state_size self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(in_dim, hidden_size) self.fc3 = nn.Linear(in_dim, hidden_size) self.fc4 = nn.Linear(in_dim, hidden_size) self.fc5 = nn.Linear(hidden_size, 1) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc2)) self.fc4.weight.data.uniform_(*hidden_init(self.fc2)) self.fc5.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_9 = self.fc4.weight primals_10 = self.fc4.bias primals_11 = self.fc5.weight primals_12 = self.fc5.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
drib861204/Soft-Actor-Critic-and-Extensions
DeepCritic
false
15,236
[ "MIT" ]
143
3075df7430c1c49177b3798d753a9e3f6226672e
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (32, 8), (8, 1)) assert_size_stride(primals_4, (32, ), (1, )) assert_size_stride(primals_5, (32, 32), (32, 1)) assert_size_stride(primals_6, (32, ), (1, )) assert_size_stride(primals_7, (1, 32), (32, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 128, grid=grid(128), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (32, 32), (1, 32), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 128, grid=grid(128), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf6) del primals_8 return (buf6, buf0, buf2, buf4, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_size (int): Number of nodes in the network layers """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, 1) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" x = torch.cat((state, action), dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (32, 8), (8, 1)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32, 32), (32, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (1, 32), (32, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 32), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(128)](buf2, primals_4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (32, 32), (1, 32), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(128)](buf4, primals_6, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf6) del primals_8 return buf6, buf0, buf2, buf4, primals_7, primals_5 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size=32): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed hidden_size (int): Number of nodes in the network layers """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.fc1 = nn.Linear(state_size + action_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, 1) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
drib861204/Soft-Actor-Critic-and-Extensions
Critic
false
15,237
[ "MIT" ]
143
3075df7430c1c49177b3798d753a9e3f6226672e
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
SubNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # y_1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf3, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SubNet(nn.Module): """ The subnetwork that is used in TFN for video and audio in the pre-fusion stage """ def __init__(self, in_size, hidden_size, n_class, dropout, modal_name= 'text'): """ Args: in_size: input dimension hidden_size: hidden layer dimension dropout: dropout probability Output: (return value in forward) a tensor of shape (batch_size, hidden_size) """ super(SubNet, self).__init__() self.drop = nn.Dropout(p=dropout) self.linear_1 = nn.Linear(in_size, hidden_size) self.linear_2 = nn.Linear(hidden_size, hidden_size) self.linear_3 = nn.Linear(hidden_size, n_class) def forward(self, x): """ Args: x: tensor of shape (batch_size, in_size) """ dropped = self.drop(x) y_1 = torch.tanh(self.linear_1(dropped)) self.linear_2(y_1) y_2 = torch.tanh(self.linear_2(y_1)) y_3 = self.linear_3(y_2) return y_2, y_3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'hidden_size': 4, 'n_class': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf3, reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, primals_6, primals_4 class SubNetNew(nn.Module): """ The subnetwork that is used in TFN for video and audio in the pre-fusion stage """ def __init__(self, in_size, hidden_size, n_class, dropout, modal_name= 'text'): """ Args: in_size: input dimension hidden_size: hidden layer dimension dropout: dropout probability Output: (return value in forward) a tensor of shape (batch_size, hidden_size) """ super(SubNetNew, self).__init__() self.drop = nn.Dropout(p=dropout) self.linear_1 = nn.Linear(in_size, hidden_size) self.linear_2 = nn.Linear(hidden_size, hidden_size) self.linear_3 = nn.Linear(hidden_size, n_class) def forward(self, input_0): primals_2 = self.linear_1.weight primals_3 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_6 = self.linear_3.weight primals_7 = self.linear_3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
dumpmemory/Multimodal-Infomax
SubNet
false
15,238
[ "MIT" ]
57
9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
https://github.com/dumpmemory/Multimodal-Infomax/tree/9a6dc8f2bfa861cd447ba65c6a037cd7dd24f473
CondInjection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xw/cxwg3k3qmxnavr6z7eiak7zhuseu6vhubtxueqvow6moxv7a3sqa.py # Topologically Sorted Source Nodes: [mul, add_1], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add_1 => add_1 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %normal_functional), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, ), (1, )) buf0 = empty_strided_cuda((256, 1, 4, 4), (16, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional] buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add_1], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_3, buf2, buf3, 16384, grid=grid(16384), stream=stream0) del primals_1 del primals_3 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CondInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, labels, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() labels = labels.view(-1, 1, 1, 1) batch, _, height, width = image.shape image.new_ones(batch, 1, height, width) / (labels + 1) return image + self.weight * noise def get_inputs(): return [torch.rand([256, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x3, tmp5, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) buf0 = empty_strided_cuda((256, 1, 4, 4), (16, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(16384)](primals_1, primals_3, buf2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf3, buf2 class CondInjectionNew(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, input_0, input_1): primals_3 = self.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
dubtor/EditGAN-Robert
CondInjection
false
15,239
[ "BSD-2-Clause" ]
110
8e6d80e7647c3536827f11cf0a9abf51c42794b2
https://github.com/dubtor/EditGAN-Robert/tree/8e6d80e7647c3536827f11cf0a9abf51c42794b2
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ja/cjaqivxqt47u6u4ytdtwy4anlve2a555h24d2t23cwekkpahiofd.py # Topologically Sorted Source Nodes: [predict, mul, intersect, sum_1, mul_1, add_1, unionset, sum_2, truediv, loss], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sum, aten.div, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # intersect => add # loss => sub # mul => mul # mul_1 => mul_1 # predict => sigmoid # sum_1 => sum_1 # sum_2 => sum_2 # truediv => div # unionset => add_2 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-09), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sum_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) triton_per_fused_add_div_mul_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sigmoid_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1e-09 tmp5 = tmp3 + tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp1 + tmp2 tmp10 = tmp9 + tmp4 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 2.0 tmp15 = tmp8 * tmp14 tmp16 = tmp15 / tmp13 tmp17 = 1.0 tmp18 = tmp17 - tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [predict, mul, intersect, sum_1, mul_1, add_1, unionset, sum_2, truediv, loss], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sum, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sigmoid_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DiceLoss(nn.Module): def __init__(self, epsilon=1e-09): """Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定 paper: Dice Loss for Data-imbalanced NLP Tasks url: https://arxiv.org/pdf/1911.02855.pdf args: reduction: str, Specifies the reduction to apply to the output, 输出形式. eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'`` epsilon: float, Minimum of maths, 无穷小. eg. 1e-9 returns: Tensor of loss. examples: >>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],] >>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float() >>> loss = DiceLoss()(logits, label) """ super(DiceLoss, self).__init__() self.epsilon = epsilon def forward(self, logits, labels): predict = torch.sigmoid(logits) intersect = predict * labels + self.epsilon unionset = predict + labels + self.epsilon loss = 1 - 2 * intersect.sum() / unionset.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1e-09 tmp5 = tmp3 + tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp1 + tmp2 tmp10 = tmp9 + tmp4 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 2.0 tmp15 = tmp8 * tmp14 tmp16 = tmp15 / tmp13 tmp17 = 1.0 tmp18 = tmp17 - tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sigmoid_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class DiceLossNew(nn.Module): def __init__(self, epsilon=1e-09): """Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定 paper: Dice Loss for Data-imbalanced NLP Tasks url: https://arxiv.org/pdf/1911.02855.pdf args: reduction: str, Specifies the reduction to apply to the output, 输出形式. eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'`` epsilon: float, Minimum of maths, 无穷小. eg. 1e-9 returns: Tensor of loss. examples: >>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],] >>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float() >>> loss = DiceLoss()(logits, label) """ super(DiceLossNew, self).__init__() self.epsilon = epsilon def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dumpmemory/Pytorch-NLU
DiceLoss
false
15,240
[ "Apache-2.0" ]
115
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
LayerNormLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py # Topologically Sorted Source Nodes: [hx], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # hx => full_default # Graph fragment: # %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2f/c2fkx6tbj26h4otrn2qrjtiwj3hyc22tyc25wwddmjpxxckktv6j.py # Topologically Sorted Source Nodes: [layer_norm, layer_norm_1, gates], Original ATen: [aten.native_layer_norm, aten.add] # Source node to ATen node mapping: # gates => add_4 # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # layer_norm_1 => add_2, add_3, mul_2, mul_3, rsqrt_1, sub_1, var_mean_1 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%addmm_1, [1]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_8), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_9), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %add_3), kwargs = {}) triton_per_fused_add_native_layer_norm_1 = async_compile.triton('triton_per_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_layer_norm_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp42 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr3 + (r1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr4 + (r1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr5 + (r1), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp29 / tmp9 tmp31 = tmp23 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tmp36 / tmp17 tmp38 = tmp37 + tmp19 tmp39 = libdevice.rsqrt(tmp38) tmp40 = tmp0 - tmp10 tmp41 = tmp40 * tmp21 tmp43 = tmp41 * tmp42 tmp45 = tmp43 + tmp44 tmp46 = tmp22 - tmp30 tmp47 = tmp46 * tmp39 tmp49 = tmp47 * tmp48 tmp51 = tmp49 + tmp50 tmp52 = tmp45 + tmp51 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp39, xmask) tl.store(out_ptr2 + (r1 + (16*x0)), tmp52, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr1 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ms/cms6ctu6l3gsya7bcfoi6ljxkz24qfnsh4dphpozj76xxmuypt3i.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_2,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3p/c3pqbidnyc7ibhtbltkpvphbztgjerdojzgow6gd25qnvcpr6w7i.py # Topologically Sorted Source Nodes: [g, mul, mul_1, cy], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # cy => add_5 # g => tanh # mul => mul_4 # mul_1 => mul_5 # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%slice_4,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_5, %full_default), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_4, %tanh), kwargs = {}) # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) triton_poi_fused_add_mul_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp4 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tmp5 = libdevice.tanh(tmp4) tmp6 = tmp3 * tmp5 tmp7 = tmp2 + tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vp/cvpke7vqb5rcrpra7r6jl5ewicc4xljasp4hd4x25ttzukjdcria.py # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_2 => add_6, rsqrt_2, var_mean_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [1]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_7, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) triton_poi_fused_native_layer_norm_4 = async_compile.triton('triton_poi_fused_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ub/cubfrt3yztsismm4xpw5raox6rdfhcpimoypwjc2xctqj25kkzdc.py # Topologically Sorted Source Nodes: [layer_norm_2, tanh_1, hy], Original ATen: [aten.native_layer_norm, aten.tanh, aten.mul] # Source node to ATen node mapping: # hy => mul_8 # layer_norm_2 => add_6, add_7, mul_6, mul_7, rsqrt_2, sub_2, var_mean_2 # tanh_1 => tanh_1 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [1]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_7, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_8), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_2), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %primals_10), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %primals_11), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %tanh_1), kwargs = {}) triton_poi_fused_mul_native_layer_norm_tanh_5 = async_compile.triton('triton_poi_fused_mul_native_layer_norm_tanh_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_native_layer_norm_tanh_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, ), (1, )) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hx], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_6 del primals_7 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0); del buf3 # reuse buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0); del buf8 # reuse buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm, layer_norm_1, gates], Original ATen: [aten.native_layer_norm, aten.add] triton_per_fused_add_native_layer_norm_1.run(buf5, buf10, buf1, buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7, buf11, 4, 16, grid=grid(4), stream=stream0) buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf11, buf12, 48, grid=grid(48), stream=stream0) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, mul, mul_1, cy], Original ATen: [aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_tanh_3.run(buf12, buf11, buf13, 16, grid=grid(16), stream=stream0) del buf11 buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_4.run(buf13, buf14, buf15, 4, grid=grid(4), stream=stream0) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2, tanh_1, hy], Original ATen: [aten.native_layer_norm, aten.tanh, aten.mul] triton_poi_fused_mul_native_layer_norm_tanh_5.run(buf12, buf13, buf14, buf15, primals_10, primals_11, buf16, 16, grid=grid(16), stream=stream0) del buf14 del buf15 return (buf16, buf13, primals_1, primals_4, primals_5, primals_8, primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6, buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0), reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data class LayerNormLSTMCell(nn.LSTMCell): def __init__(self, input_size, hidden_size, bias=True): super().__init__(input_size, hidden_size, bias) self.ln_ih = nn.LayerNorm(4 * hidden_size) self.ln_hh = nn.LayerNorm(4 * hidden_size) self.ln_ho = nn.LayerNorm(hidden_size) def forward(self, input, hidden=None): if hidden is None: hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False) cx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False) else: hx, cx = hidden gates = self.ln_ih(F.linear(input, self.weight_ih, self.bias_ih) ) + self.ln_hh(F.linear(hx, self.weight_hh, self.bias_hh)) i, f, o = gates[:, :3 * self.hidden_size].sigmoid().chunk(3, 1) g = gates[:, 3 * self.hidden_size:].tanh() cy = f * cx + i * g hy = o * self.ln_ho(cy).tanh() return hy, cy def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp42 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tmp29 / tmp9 tmp31 = tmp23 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp37 = tmp36 / tmp17 tmp38 = tmp37 + tmp19 tmp39 = libdevice.rsqrt(tmp38) tmp40 = tmp0 - tmp10 tmp41 = tmp40 * tmp21 tmp43 = tmp41 * tmp42 tmp45 = tmp43 + tmp44 tmp46 = tmp22 - tmp30 tmp47 = tmp46 * tmp39 tmp49 = tmp47 * tmp48 tmp51 = tmp49 + tmp50 tmp52 = tmp45 + tmp51 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp39, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp52, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp3 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp4 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 * tmp1 tmp5 = libdevice.tanh(tmp4) tmp6 = tmp3 * tmp5 tmp7 = tmp2 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_6 del primals_7 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0) del buf8 buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused_add_native_layer_norm_1[grid(4)](buf5, buf10, buf1, buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7, buf11, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_sigmoid_2[grid(48)](buf11, buf12, 48, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_tanh_3[grid(16)](buf12, buf11, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf11 buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_4[grid(4)](buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_native_layer_norm_tanh_5[grid(16)](buf12, buf13, buf14, buf15, primals_10, primals_11, buf16, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf14 del buf15 return (buf16, buf13, primals_1, primals_4, primals_5, primals_8, primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6, buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0), reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13) class LayerNormLSTMCellNew(nn.LSTMCell): def __init__(self, input_size, hidden_size, bias=True): super().__init__(input_size, hidden_size, bias) self.ln_ih = nn.LayerNorm(4 * hidden_size) self.ln_hh = nn.LayerNorm(4 * hidden_size) self.ln_ho = nn.LayerNorm(hidden_size) def forward(self, input_0): primals_2 = self.weight_ih primals_6 = self.weight_hh primals_3 = self.bias_ih primals_4 = self.bias_hh primals_5 = self.ln_ih.weight primals_7 = self.ln_ih.bias primals_8 = self.ln_hh.weight primals_9 = self.ln_hh.bias primals_10 = self.ln_ho.weight primals_11 = self.ln_ho.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
drgripa1/deepvecfont
LayerNormLSTMCell
false
15,241
[ "MIT" ]
68
a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
https://github.com/drgripa1/deepvecfont/tree/a44d81ba19a22e43b4e576cd8ebc5c2fd961a621
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mb/cmbmpgnsdmf6qxq2jx4p6hbawha36o5xu3suwn2octgeojszd6vz.py # Topologically Sorted Source Nodes: [mul, ge_1, softplus_2, softplus_3, sub_1, log_1_probs, mul_1, sub_2, mul_2, ge, neg_1, softplus, add, softplus_1, neg_2, log_0_probs, mul_3, loss, probs, sub, abs_1, pow_1, coeff, loss_1, loss_2], Original ATen: [aten.mul, aten.ge, aten.softplus, aten.sub, aten.where, aten.rsub, aten.neg, aten.add, aten.sigmoid, aten.abs, aten.pow, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # coeff => neg # ge => ge # ge_1 => ge_1 # log_0_probs => where_2 # log_1_probs => where_5 # loss => add_1 # loss_1 => mul_8 # loss_2 => mean # mul => mul_4 # mul_1 => mul_5 # mul_2 => mul_6 # mul_3 => mul_7 # neg_1 => neg_1 # neg_2 => neg_2 # pow_1 => pow_1 # probs => sigmoid # softplus => div, exp, gt, log1p, mul, where # softplus_1 => div_1, exp_1, gt_1, log1p_1, mul_1, where_1 # softplus_2 => div_2, exp_2, gt_2, log1p_2, mul_2, where_3 # softplus_3 => div_3, exp_3, gt_3, log1p_3, mul_3, where_4 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.5), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_2, 50), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_2,), kwargs = {}) # %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p_2, -1), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %arg0_1, %div_2), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_3, 50), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {}) # %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_3,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p_3, 1), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %arg0_1, %div_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %where_4), kwargs = {}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge_1, %where_3, %sub_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %where_5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 50), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, -1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %div), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, %where), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_1, 50), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p_1, 1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %arg0_1, %div_1), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_1,), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %add, %neg_2), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %where_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_7), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %sigmoid), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %neg), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_8,), kwargs = {}) triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0 = async_compile.triton('triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 0.0 tmp5 = tmp3 >= tmp4 tmp6 = -1.0 tmp7 = tmp3 * tmp6 tmp8 = 50.0 tmp9 = tmp7 > tmp8 tmp10 = tl_math.exp(tmp7) tmp11 = libdevice.log1p(tmp10) tmp12 = tmp11 * tmp6 tmp13 = tl.where(tmp9, tmp3, tmp12) tmp14 = 1.0 tmp15 = tmp3 * tmp14 tmp16 = tmp15 > tmp8 tmp17 = tl_math.exp(tmp15) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp18 * tmp14 tmp20 = tl.where(tmp16, tmp3, tmp19) tmp21 = tmp3 - tmp20 tmp22 = tl.where(tmp5, tmp13, tmp21) tmp23 = tmp2 * tmp22 tmp24 = tmp14 - tmp0 tmp25 = tmp24 * tmp1 tmp26 = -tmp3 tmp27 = tmp26 + tmp13 tmp28 = -tmp20 tmp29 = tl.where(tmp5, tmp27, tmp28) tmp30 = tmp25 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.sigmoid(tmp3) tmp33 = tmp0 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 * tmp34 tmp36 = -tmp35 tmp37 = tmp31 * tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = 256.0 tmp42 = tmp40 / tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp42, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, ge_1, softplus_2, softplus_3, sub_1, log_1_probs, mul_1, sub_2, mul_2, ge, neg_1, softplus, add, softplus_1, neg_2, log_0_probs, mul_3, loss, probs, sub, abs_1, pow_1, coeff, loss_1, loss_2], Original ATen: [aten.mul, aten.ge, aten.softplus, aten.sub, aten.where, aten.rsub, aten.neg, aten.add, aten.sigmoid, aten.abs, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0.run(buf2, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class FocalLoss(nn.Module): def __init__(self, alpha=0.5, gamma=2, reduction='mean'): """FocalLoss 聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点 url: https://github.com/CoinCheung/pytorch-loss Usage is same as nn.BCEWithLogits: >>> loss = criteria(logits, lbs) """ super(FocalLoss, self).__init__() self.reduction = reduction self.alpha = alpha self.gamma = gamma def forward(self, logits, labels): probs = torch.sigmoid(logits) coeff = torch.abs(labels - probs).pow(self.gamma).neg() log_0_probs = torch.where(logits >= 0, -logits + nn.functional. softplus(logits, -1, 50), -nn.functional.softplus(logits, 1, 50)) log_1_probs = torch.where(logits >= 0, nn.functional.softplus( logits, -1, 50), logits - nn.functional.softplus(logits, 1, 50)) loss = labels * self.alpha * log_1_probs + (1.0 - labels) * (1.0 - self.alpha) * log_0_probs loss = loss * coeff if self.reduction == 'mean': loss = loss.mean() if self.reduction == 'sum': loss = loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 0.0 tmp5 = tmp3 >= tmp4 tmp6 = -1.0 tmp7 = tmp3 * tmp6 tmp8 = 50.0 tmp9 = tmp7 > tmp8 tmp10 = tl_math.exp(tmp7) tmp11 = libdevice.log1p(tmp10) tmp12 = tmp11 * tmp6 tmp13 = tl.where(tmp9, tmp3, tmp12) tmp14 = 1.0 tmp15 = tmp3 * tmp14 tmp16 = tmp15 > tmp8 tmp17 = tl_math.exp(tmp15) tmp18 = libdevice.log1p(tmp17) tmp19 = tmp18 * tmp14 tmp20 = tl.where(tmp16, tmp3, tmp19) tmp21 = tmp3 - tmp20 tmp22 = tl.where(tmp5, tmp13, tmp21) tmp23 = tmp2 * tmp22 tmp24 = tmp14 - tmp0 tmp25 = tmp24 * tmp1 tmp26 = -tmp3 tmp27 = tmp26 + tmp13 tmp28 = -tmp20 tmp29 = tl.where(tmp5, tmp27, tmp28) tmp30 = tmp25 * tmp29 tmp31 = tmp23 + tmp30 tmp32 = tl.sigmoid(tmp3) tmp33 = tmp0 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 * tmp34 tmp36 = -tmp35 tmp37 = tmp31 * tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = 256.0 tmp42 = tmp40 / tmp41 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp42, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_abs_add_ge_mean_mul_neg_pow_rsub_sigmoid_softplus_sub_where_0[ grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class FocalLossNew(nn.Module): def __init__(self, alpha=0.5, gamma=2, reduction='mean'): """FocalLoss 聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点 url: https://github.com/CoinCheung/pytorch-loss Usage is same as nn.BCEWithLogits: >>> loss = criteria(logits, lbs) """ super(FocalLossNew, self).__init__() self.reduction = reduction self.alpha = alpha self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dumpmemory/Pytorch-NLU
FocalLoss
false
15,242
[ "Apache-2.0" ]
115
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
CecaModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5v/c5vy2mpdsk3ol4tdn7eyu5zzhzoueyp2ctrby2n6qikrbyckgsa7.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.copy] # Source node to ATen node mapping: # y_1 => copy # Graph fragment: # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {}) triton_poi_fused_copy_1 = async_compile.triton('triton_poi_fused_copy_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-4) + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp10 & xmask, other=0.0) tmp12 = 16.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = float("nan") tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tmp3 >= tmp4 tmp21 = tmp3 < tmp1 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp2 tmp24 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1)), tmp23 & xmask, other=0.0) tmp25 = tmp24 / tmp12 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp16) tmp29 = tl.where(tmp5, tmp19, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp2, tmp29, tmp30) tmp32 = tmp0 < tmp4 tmp33 = 4 + x0 tmp34 = tmp33 >= tmp4 tmp35 = tmp33 < tmp1 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp32 tmp38 = tl.load(in_ptr0 + (3 + x0 + (4*x1)), tmp37 & xmask, other=0.0) tmp39 = tmp38 / tmp12 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp37, tmp39, tmp40) tmp42 = tl.where(tmp36, tmp41, tmp16) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp32, tmp42, tmp43) tmp45 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp9 & xmask, other=0.0) tmp46 = tmp45 / tmp12 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp9, tmp46, tmp47) tmp49 = tl.where(tmp9, tmp48, tmp16) tmp50 = tl.where(tmp32, tmp44, tmp49) tmp51 = tl.where(tmp2, tmp31, tmp50) tl.store(out_ptr0 + (x2), tmp51, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xv/cxvgsfj3x2o5ls6evsy4rhywutbtjkwezlavric3plphgvn75mea.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.copy] triton_poi_fused_copy_1.run(buf0, buf2, 24, grid=grid(24), stream=stream0) del buf0 # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4), (4, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0) return (buf4, primals_1, primals_2, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.utils.data import torchvision.transforms.functional as F import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel from torch import optim as optim class CecaModule(nn.Module): """Constructs a circular ECA module. ECA module where the conv uses circular padding rather than zero padding. Unlike the spatial dimension, the channels do not have inherent ordering nor locality. Although this module in essence, applies such an assumption, it is unnecessary to limit the channels on either "edge" from being circularly adapted to each other. This will fundamentally increase connectivity and possibly increase performance metrics (accuracy, robustness), without significantly impacting resource metrics (parameter size, throughput,latency, etc) Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) """ def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1): super(CecaModule, self).__init__() assert kernel_size % 2 == 1 if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=False) self.padding = (kernel_size - 1) // 2 def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) y = F.pad(y, (self.padding, self.padding), mode='circular') y = self.conv(y) y = y.view(x.shape[0], -1, 1, 1).sigmoid() return x * y.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.utils.data import torch.nn as nn import torch.nn.parallel from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp10 & xmask, other=0.0) tmp12 = 16.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tmp16 = float('nan') tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tmp3 >= tmp4 tmp21 = tmp3 < tmp1 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp2 tmp24 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1), tmp23 & xmask, other=0.0) tmp25 = tmp24 / tmp12 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp23, tmp25, tmp26) tmp28 = tl.where(tmp22, tmp27, tmp16) tmp29 = tl.where(tmp5, tmp19, tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp2, tmp29, tmp30) tmp32 = tmp0 < tmp4 tmp33 = 4 + x0 tmp34 = tmp33 >= tmp4 tmp35 = tmp33 < tmp1 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp32 tmp38 = tl.load(in_ptr0 + (3 + x0 + 4 * x1), tmp37 & xmask, other=0.0) tmp39 = tmp38 / tmp12 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp37, tmp39, tmp40) tmp42 = tl.where(tmp36, tmp41, tmp16) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp32, tmp42, tmp43) tmp45 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp9 & xmask, other=0.0) tmp46 = tmp45 / tmp12 tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype) tmp48 = tl.where(tmp9, tmp46, tmp47) tmp49 = tl.where(tmp9, tmp48, tmp16) tmp50 = tl.where(tmp32, tmp44, tmp49) tmp51 = tl.where(tmp2, tmp31, tmp50) tl.store(out_ptr0 + x2, tmp51, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32) triton_poi_fused_copy_1[grid(24)](buf0, buf2, 24, XBLOCK=32, num_warps=1, num_stages=1) del buf0 buf3 = extern_kernels.convolution(buf2, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4), (4, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_2, buf2, buf3 class CecaModuleNew(nn.Module): """Constructs a circular ECA module. ECA module where the conv uses circular padding rather than zero padding. Unlike the spatial dimension, the channels do not have inherent ordering nor locality. Although this module in essence, applies such an assumption, it is unnecessary to limit the channels on either "edge" from being circularly adapted to each other. This will fundamentally increase connectivity and possibly increase performance metrics (accuracy, robustness), without significantly impacting resource metrics (parameter size, throughput,latency, etc) Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) """ def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1): super(CecaModuleNew, self).__init__() assert kernel_size % 2 == 1 if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=False) self.padding = (kernel_size - 1) // 2 def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
dumpmemory/NonDeepNetworks
CecaModule
false
15,243
[ "BSD-3-Clause" ]
307
5513bf588f4e64c99583440507232675c2e21e34
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
AE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/au/cau6qypw2vz4drppp6yr6chutchyhnniousxhhlq2y5r3yu3gep5.py # Topologically Sorted Source Nodes: [enc_h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # enc_h1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py # Topologically Sorted Source Nodes: [dec_h1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # dec_h1 => relu_3 # Graph fragment: # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [enc_h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [enc_h2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [enc_h3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [dec_h1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf8, primals_11, buf16, 256, grid=grid(256), stream=stream0) del primals_11 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [dec_h2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf10, primals_13, buf15, 256, grid=grid(256), stream=stream0) del primals_13 buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf11 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [dec_h3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf12, primals_15, buf14, 256, grid=grid(256), stream=stream0) del primals_15 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_bar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_17 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf1, buf3, buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf6, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_16, buf14, primals_14, buf15, primals_12, buf16, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Linear class AE(nn.Module): def __init__(self, n_enc_1, n_enc_2, n_enc_3, n_dec_1, n_dec_2, n_dec_3, n_input, n_z): super(AE, self).__init__() self.enc_1 = Linear(n_input, n_enc_1) self.enc_2 = Linear(n_enc_1, n_enc_2) self.enc_3 = Linear(n_enc_2, n_enc_3) self.z_layer = Linear(n_enc_3, n_z) self.dec_1 = Linear(n_z, n_dec_1) self.dec_2 = Linear(n_dec_1, n_dec_2) self.dec_3 = Linear(n_dec_2, n_dec_3) self.x_bar_layer = Linear(n_dec_3, n_input) def forward(self, x): enc_h1 = F.relu(self.enc_1(x)) enc_h2 = F.relu(self.enc_2(enc_h1)) enc_h3 = F.relu(self.enc_3(enc_h2)) z = self.z_layer(enc_h3) dec_h1 = F.relu(self.dec_1(z)) dec_h2 = F.relu(self.dec_2(dec_h1)) dec_h3 = F.relu(self.dec_3(dec_h2)) x_bar = self.x_bar_layer(dec_h3) return x_bar, enc_h1, enc_h2, enc_h3, z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_enc_1': 4, 'n_enc_2': 4, 'n_enc_3': 4, 'n_dec_1': 4, 'n_dec_2': 4, 'n_dec_3': 4, 'n_input': 4, 'n_z': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_relu_0[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf8, primals_11, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf9 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf10, primals_13, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf11 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf12, primals_15, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_17 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf1, buf3, buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf6, reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor( buf12, (64, 4), (4, 1), 0), primals_16, buf14, primals_14, buf15, primals_12, buf16, primals_10, primals_8, primals_6, primals_4) class AENew(nn.Module): def __init__(self, n_enc_1, n_enc_2, n_enc_3, n_dec_1, n_dec_2, n_dec_3, n_input, n_z): super(AENew, self).__init__() self.enc_1 = Linear(n_input, n_enc_1) self.enc_2 = Linear(n_enc_1, n_enc_2) self.enc_3 = Linear(n_enc_2, n_enc_3) self.z_layer = Linear(n_enc_3, n_z) self.dec_1 = Linear(n_z, n_dec_1) self.dec_2 = Linear(n_dec_1, n_dec_2) self.dec_3 = Linear(n_dec_2, n_dec_3) self.x_bar_layer = Linear(n_dec_3, n_input) def forward(self, input_0): primals_1 = self.enc_1.weight primals_2 = self.enc_1.bias primals_4 = self.enc_2.weight primals_5 = self.enc_2.bias primals_6 = self.enc_3.weight primals_7 = self.enc_3.bias primals_8 = self.z_layer.weight primals_9 = self.z_layer.bias primals_10 = self.dec_1.weight primals_11 = self.dec_1.bias primals_12 = self.dec_2.weight primals_13 = self.dec_2.bias primals_14 = self.dec_3.weight primals_15 = self.dec_3.bias primals_16 = self.x_bar_layer.weight primals_17 = self.x_bar_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1], output[2], output[3], output[4]
drzhang3/SDCN
AE
false
15,244
[ "Apache-2.0" ]
146
3d11365bcb4af2cbe9625362737f1224aeea3b72
https://github.com/drzhang3/SDCN/tree/3d11365bcb4af2cbe9625362737f1224aeea3b72
ConvSqu
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ns/cns7oly7x2mepmwipjj63oa5ojaxyzrb5wa7zk4flpt65nf3pi65.py # Topologically Sorted Source Nodes: [silu], Original ATen: [aten.silu] # Source node to ATen node mapping: # silu => mul, sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) triton_poi_fused_silu_0 = async_compile.triton('triton_poi_fused_silu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_silu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [silu], Original ATen: [aten.silu] stream0 = get_raw_stream(0) triton_poi_fused_silu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, primals_1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.parallel from torch import optim as optim def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class ConvSqu(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): super(ConvSqu, self).__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False ) self.act = nn.SiLU() if act else nn.Identity() def forward(self, x): return self.act(self.conv(x)) def fuseforward(self, x): return self.act(self.conv(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4, 'c2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn import torch.nn.parallel from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_silu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_silu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class ConvSquNew(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): super(ConvSquNew, self).__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False ) self.act = nn.SiLU() if act else nn.Identity() def fuseforward(self, x): return self.act(self.conv(x)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dumpmemory/NonDeepNetworks
ConvSqu
false
15,245
[ "BSD-3-Clause" ]
307
5513bf588f4e64c99583440507232675c2e21e34
https://github.com/dumpmemory/NonDeepNetworks/tree/5513bf588f4e64c99583440507232675c2e21e34
DeepActor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ah/cahw4m4nxjgqpxs7r4u4qbnadoi4zee4mfx4fpd4jjexzq6bpqvh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 36 x1 = (xindex // 36) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((32*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 36, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-32) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_6 => relu_3 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3t/c3tmbcrejifcmnqggk6rpsmyoccwiqogrrxnaamooywcktl5qdne.py # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # log_std_1 => clamp_max, clamp_min # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, -20), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_tensor, -20), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add_tensor, 2), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_2 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3m/c3mnc3w2fowp5vtbgub6g2jkwsfwatrw4z7fr6bbwbrq7ztl7slt.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu_2 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (32, 36), (36, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 36), (36, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 36), (36, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (4, 32), (32, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 32), (32, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 36), (36, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_3, buf1, 144, grid=grid(144), stream=stream0) buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (36, 32), (1, 36), 0), out=buf2) buf3 = empty_strided_cuda((4, 36), (36, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf2, primals_5, primals_3, buf3, 144, grid=grid(144), stream=stream0) buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (36, 32), (1, 36), 0), out=buf4) buf5 = empty_strided_cuda((4, 36), (36, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf4, primals_7, primals_3, buf5, 144, grid=grid(144), stream=stream0) buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (36, 32), (1, 36), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf7, primals_9, 128, grid=grid(128), stream=stream0) del primals_9 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(primals_10, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_12, (32, 4), (1, 32), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [log_std_1], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_2.run(buf9, primals_13, buf10, buf11, 16, grid=grid(16), stream=stream0) del buf9 del primals_13 buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf4, primals_7, buf12, 128, grid=grid(128), stream=stream0) del buf4 del primals_7 buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf2, primals_5, buf13, 128, grid=grid(128), stream=stream0) del buf2 del primals_5 buf14 = empty_strided_cuda((4, 32), (32, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf0, primals_2, buf14, 128, grid=grid(128), stream=stream0) del buf0 del primals_2 return (buf8, buf10, primals_3, buf1, buf3, buf5, buf7, buf11, primals_12, primals_10, primals_8, buf12, primals_6, buf13, primals_4, buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 36), (36, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 36), (36, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 36), (36, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DeepActor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size= 32, init_w=0.003, log_std_min=-20, log_std_max=2): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(DeepActor, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.log_std_min = log_std_min self.log_std_max = log_std_max in_dim = hidden_size + state_size self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(in_dim, hidden_size) self.fc3 = nn.Linear(in_dim, hidden_size) self.fc4 = nn.Linear(in_dim, hidden_size) self.mu = nn.Linear(hidden_size, action_size) self.log_std_linear = nn.Linear(hidden_size, action_size) def reset_parameters(self, init_w=0.003): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(*hidden_init(self.fc4)) self.mu.weight.data.uniform_(-init_w, init_w) self.log_std_linear.weight.data.uniform_(-init_w, init_w) def forward(self, state: 'torch.tensor') ->(float, float): x = F.relu(self.fc1(state)) x = torch.cat([x, state], dim=1) x = F.relu(self.fc2(x)) x = torch.cat([x, state], dim=1) x = F.relu(self.fc3(x)) x = torch.cat([x, state], dim=1) x = F.relu(self.fc4(x)) mu = self.mu(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) return mu, log_std def evaluate(self, state, epsilon=1e-06): mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon) ).sum(1, keepdim=True) return action, log_prob def get_action(self, state): """ returns the action based on a squashed gaussian policy. That means the samples are obtained according to: a(s,e)= tanh(mu(s)+sigma(s)+e) """ mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) return action.detach().cpu() def get_det_action(self, state): mu, _log_std = self.forward(state) return torch.tanh(mu).detach().cpu() def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 36 x1 = xindex // 36 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 36, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-32 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = -20.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 2.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 >= tmp3 tmp8 = tmp2 <= tmp5 tmp9 = tmp7 & tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (32, 36), (36, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 36), (36, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 36), (36, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (4, 32), (32, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 32), (32, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 36), (36, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(144)](buf0, primals_2, primals_3, buf1, 144, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (36, 32), (1, 36), 0), out=buf2) buf3 = empty_strided_cuda((4, 36), (36, 1), torch.float32) triton_poi_fused_cat_0[grid(144)](buf2, primals_5, primals_3, buf3, 144, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (36, 32), (1, 36), 0), out=buf4) buf5 = empty_strided_cuda((4, 36), (36, 1), torch.float32) triton_poi_fused_cat_0[grid(144)](buf4, primals_7, primals_3, buf5, 144, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (36, 32), (1, 36), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_1[grid(128)](buf7, primals_9, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_12, (32, 4), (1, 32), 0), out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf9, primals_13, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf9 del primals_13 buf12 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf4, primals_7, buf12, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_7 buf13 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf2, primals_5, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 buf14 = empty_strided_cuda((4, 32), (32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(128)](buf0, primals_2, buf14, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf8, buf10, primals_3, buf1, buf3, buf5, buf7, buf11, primals_12, primals_10, primals_8, buf12, primals_6, buf13, primals_4, buf14) def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class DeepActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, device, hidden_size= 32, init_w=0.003, log_std_min=-20, log_std_max=2): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(DeepActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.device = device self.log_std_min = log_std_min self.log_std_max = log_std_max in_dim = hidden_size + state_size self.fc1 = nn.Linear(state_size, hidden_size) self.fc2 = nn.Linear(in_dim, hidden_size) self.fc3 = nn.Linear(in_dim, hidden_size) self.fc4 = nn.Linear(in_dim, hidden_size) self.mu = nn.Linear(hidden_size, action_size) self.log_std_linear = nn.Linear(hidden_size, action_size) def reset_parameters(self, init_w=0.003): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(*hidden_init(self.fc3)) self.fc4.weight.data.uniform_(*hidden_init(self.fc4)) self.mu.weight.data.uniform_(-init_w, init_w) self.log_std_linear.weight.data.uniform_(-init_w, init_w) def evaluate(self, state, epsilon=1e-06): mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) log_prob = (dist.log_prob(e) - torch.log(1 - action.pow(2) + epsilon) ).sum(1, keepdim=True) return action, log_prob def get_action(self, state): """ returns the action based on a squashed gaussian policy. That means the samples are obtained according to: a(s,e)= tanh(mu(s)+sigma(s)+e) """ mu, log_std = self.forward(state) std = log_std.exp() dist = Normal(mu, std) e = dist.rsample() action = torch.tanh(e) return action.detach().cpu() def get_det_action(self, state): mu, _log_std = self.forward(state) return torch.tanh(mu).detach().cpu() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.mu.weight primals_11 = self.mu.bias primals_12 = self.log_std_linear.weight primals_13 = self.log_std_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
drib861204/Soft-Actor-Critic-and-Extensions
DeepActor
false
15,246
[ "MIT" ]
143
3075df7430c1c49177b3798d753a9e3f6226672e
https://github.com/drib861204/Soft-Actor-Critic-and-Extensions/tree/3075df7430c1c49177b3798d753a9e3f6226672e
AdaILN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/35/c35dtpn5wnt7hcpwlklhvzbshubggu6yulxkjd2gllcaby3qm22c.py # Topologically Sorted Source Nodes: [l_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # l_norm => add_1, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-08), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_per_fused_native_layer_norm_0 = async_compile.triton('triton_per_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7r/c7rmb6gu3prshrf2ym5cazamnysbmqxit43upyew57hiubdf6cgv.py # Topologically Sorted Source Nodes: [i_norm, l_norm, mul, sub, mul_1, out, mul_2, out_1], Original ATen: [aten._native_batch_norm_legit, aten.native_layer_norm, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # i_norm => add, rsqrt, var_mean # l_norm => mul_1, sub_1 # mul => mul_2 # mul_1 => mul_3 # mul_2 => mul_4 # out => add_2 # out_1 => add_3 # sub => sub_2 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %expand), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %sub_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %view_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %view_3), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp0 - tmp26 tmp29 = tmp27 * tmp28 tmp30 = 1.0 tmp31 = tmp30 - tmp24 tmp32 = tmp29 * tmp31 tmp33 = tmp25 + tmp32 tmp35 = tmp33 * tmp34 tmp37 = tmp35 + tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (16*x0)), tmp37, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf7 = reinterpret_tensor(buf5, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [l_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_per_fused_native_layer_norm_0.run(buf7, primals_1, buf4, 4, 64, grid=grid(4), stream=stream0) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [i_norm, l_norm, mul, sub, mul_1, out, mul_2, out_1], Original ATen: [aten._native_batch_norm_legit, aten.native_layer_norm, aten.mul, aten.rsub, aten.add] triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1.run(buf3, primals_1, primals_2, buf4, buf7, primals_3, primals_4, buf0, buf8, 16, 16, grid=grid(16), stream=stream0) del primals_2 del primals_4 return (buf8, primals_1, primals_3, buf0, buf3, buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.cpp_extension class AdaILN(nn.Module): def __init__(self, channels, resl, eps=1e-08): super().__init__() self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.rho.data.fill_(1.0) self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False) self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps, elementwise_affine=False) def forward(self, x, gamma, beta): i_norm = self.instance_norm(x) l_norm = self.layer_norm(x) out = i_norm * self.rho.expand(x.size(0), -1, -1, -1) + l_norm * (1 - self.rho.expand(x.size(0), -1, -1, -1)) out = out * gamma.view(out.size(0), -1, 1, 1) + beta.view(out.size( 0), -1, 1, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'resl': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-08 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp0 - tmp26 tmp29 = tmp27 * tmp28 tmp30 = 1.0 tmp31 = tmp30 - tmp24 tmp32 = tmp29 * tmp31 tmp33 = tmp25 + tmp32 tmp35 = tmp33 * tmp34 tmp37 = tmp35 + tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp37, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf7 = reinterpret_tensor(buf5, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf5 get_raw_stream(0) triton_per_fused_native_layer_norm_0[grid(4)](buf7, primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_native_layer_norm_rsub_1[ grid(16)](buf3, primals_1, primals_2, buf4, buf7, primals_3, primals_4, buf0, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 del primals_4 return buf8, primals_1, primals_3, buf0, buf3, buf4, buf7 class AdaILNNew(nn.Module): def __init__(self, channels, resl, eps=1e-08): super().__init__() self.rho = nn.Parameter(torch.Tensor(1, channels, 1, 1)) self.rho.data.fill_(1.0) self.instance_norm = nn.InstanceNorm2d(channels, eps=eps, affine=False) self.layer_norm = nn.LayerNorm((channels, resl, resl), eps=eps, elementwise_affine=False) def forward(self, input_0, input_1, input_2): primals_2 = self.rho primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
STomoya/animeface
AdaILN
false
15,247
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
DiceLossV1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/o2/co2neuarhxb7dypnrg4zbftxpiycuy4tsjhb4xfp7la4xmikdlvw.py # Topologically Sorted Source Nodes: [prob, prob_1, sub, mul, add, sub_1, mul_1, add_1, add_2, truediv, dsc_i, loss], Original ATen: [aten.sigmoid, aten.gather, aten.rsub, aten.mul, aten.add, aten.div, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # dsc_i => sub_2 # loss => mean # mul => mul # mul_1 => mul_1 # prob => sigmoid # prob_1 => gather # sub => sub # sub_1 => sub_1 # truediv => div # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %gather : [num_users=4] = call_function[target=torch.ops.aten.gather.default](args = (%sigmoid, 1, %view), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %gather), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %gather), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-09), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %gather), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %gather), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-09), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last') tmp7 = tmp6.to(tl.float32) tmp8 = tl.sigmoid(tmp7) tmp9 = 1.0 tmp10 = tmp9 - tmp8 tmp11 = tmp10 * tmp8 tmp12 = 1e-09 tmp13 = tmp11 + tmp12 tmp14 = tmp11 + tmp9 tmp15 = tmp14 + tmp12 tmp16 = tmp13 / tmp15 tmp17 = tmp9 - tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 4.0 tmp22 = tmp20 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [prob, prob_1, sub, mul, add, sub_1, mul_1, add_1, add_2, truediv, dsc_i, loss], Original ATen: [aten.sigmoid, aten.gather, aten.rsub, aten.mul, aten.add, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DiceLossV1(nn.Module): def __init__(self, reduction='mean', epsilon=1e-09): """【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难 paper: Dice Loss for Data-imbalanced NLP Tasks url: https://arxiv.org/pdf/1911.02855.pdf args: reduction: str, Specifies the reduction to apply to the output, 输出形式. eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'`` epsilon: float, Minimum of maths, 无穷小. eg. 1e-9 returns: Tensor of loss. examples: >>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],] >>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float() >>> loss = DiceLoss()(logits, label) """ super(DiceLossV1, self).__init__() self.reduction = reduction self.epsilon = epsilon def forward(self, logits, labels): prob = torch.sigmoid(logits) index = labels.unsqueeze(1).view(prob.size(0), -1) prob = torch.gather(prob, dim=1, index=index) dsc_i = 1 - ((1 - prob) * prob + self.epsilon) / ((1 - prob) * prob + 1 + self.epsilon) if 'mean' == self.reduction: loss = dsc_i.mean() else: loss = dsc_i.sum() return loss def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype= torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = tmp6.to(tl.float32) tmp8 = tl.sigmoid(tmp7) tmp9 = 1.0 tmp10 = tmp9 - tmp8 tmp11 = tmp10 * tmp8 tmp12 = 1e-09 tmp13 = tmp11 + tmp12 tmp14 = tmp11 + tmp9 tmp15 = tmp14 + tmp12 tmp16 = tmp13 / tmp15 tmp17 = tmp9 - tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 4.0 tmp22 = tmp20 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_gather_mean_mul_rsub_sigmoid_0[grid(1)](buf1, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class DiceLossV1New(nn.Module): def __init__(self, reduction='mean', epsilon=1e-09): """【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难 paper: Dice Loss for Data-imbalanced NLP Tasks url: https://arxiv.org/pdf/1911.02855.pdf args: reduction: str, Specifies the reduction to apply to the output, 输出形式. eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'`` epsilon: float, Minimum of maths, 无穷小. eg. 1e-9 returns: Tensor of loss. examples: >>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],] >>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float() >>> loss = DiceLoss()(logits, label) """ super(DiceLossV1New, self).__init__() self.reduction = reduction self.epsilon = epsilon def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dumpmemory/Pytorch-NLU
DiceLossV1
false
15,248
[ "Apache-2.0" ]
115
864fb9acc7751fc51abd3d05d24b5a9a7eab7110
https://github.com/dumpmemory/Pytorch-NLU/tree/864fb9acc7751fc51abd3d05d24b5a9a7eab7110
HighwayLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/i3/ci3asduovereenc5juon6pzphu6oeecdpkloo6migbh6cxdu6y2t.py # Topologically Sorted Source Nodes: [gate, nlin, mul, sub, mul_1, res], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # gate => sigmoid # mul => mul # mul_1 => mul_1 # nlin => tanh # res => add # sub => sub # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp7 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gate, nlin, mul, sub, mul_1, res], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf0, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.utils.checkpoint import torch.utils.tensorboard def my_xavier_init(m, gain=1): """Xavier initialization: weights initialization that tries to make variance of outputs of a layer equal to variance of its inputs. """ for p in m.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain) else: nn.init.constant_(p, 0) class HighwayLayer(torch.nn.Module): """Highway transformation used in span prediction.""" def __init__(self, dim): super(HighwayLayer, self).__init__() self.gate_proj = nn.Linear(dim, dim, bias=True) self.nlin_proj = nn.Linear(dim, dim, bias=True) my_xavier_init(self.nlin_proj) my_xavier_init(self.gate_proj) nn.init.constant_(self.gate_proj.bias, -1) def forward(self, x): gate = torch.sigmoid(self.gate_proj(x)) nlin = torch.tanh(self.nlin_proj(x)) res = gate * nlin + (1 - gate) * x return res def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.utils.data.distributed import torch.utils.checkpoint import torch.utils.tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 def my_xavier_init(m, gain=1): """Xavier initialization: weights initialization that tries to make variance of outputs of a layer equal to variance of its inputs. """ for p in m.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p, gain) else: nn.init.constant_(p, 0) class HighwayLayerNew(torch.nn.Module): """Highway transformation used in span prediction.""" def __init__(self, dim): super(HighwayLayerNew, self).__init__() self.gate_proj = nn.Linear(dim, dim, bias=True) self.nlin_proj = nn.Linear(dim, dim, bias=True) my_xavier_init(self.nlin_proj) my_xavier_init(self.gate_proj) nn.init.constant_(self.gate_proj.bias, -1) def forward(self, input_0): primals_1 = self.gate_proj.weight primals_2 = self.gate_proj.bias primals_4 = self.nlin_proj.weight primals_5 = self.nlin_proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ali-senguel/fairo
HighwayLayer
false
15,249
[ "MIT" ]
669
1ec5d8ecbdfc782de63a92aad9bf8534110ce762
https://github.com/ali-senguel/fairo/tree/1ec5d8ecbdfc782de63a92aad9bf8534110ce762