entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
FromRGB
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/qt/cqtywk2a4wy7frtx4av5g7bvv4kxfvxaxuv64y65j6ebodrpqxvc.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class FromRGB(nn.Module):
"""Some Information about FromRGB"""
def __init__(self, channels):
super(FromRGB, self).__init__()
self.conv = nn.Conv2d(3, channels, kernel_size=1, stride=1, padding
=0, bias=True)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class FromRGBNew(nn.Module):
"""Some Information about FromRGB"""
def __init__(self, channels):
super(FromRGBNew, self).__init__()
self.conv = nn.Conv2d(3, channels, kernel_size=1, stride=1, padding
=0, bias=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
uthree/gan-image-generator
|
FromRGB
| false | 11,001 |
[
"MIT"
] | 0 |
85585e389b5a494393da0789d82824f8c811e263
|
https://github.com/uthree/gan-image-generator/tree/85585e389b5a494393da0789d82824f8c811e263
|
GatedResidualNetwork
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# n2 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/en/cenabni3h77zazokaukc6skf7kuv4ta2awact6sfvxbq7che2ucu.py
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# grn => var_mean
# sigmoid => sigmoid
# x_1 => mul_3
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uj/cuj7uutcqmly235uixsc4lxbmmowhsqrjmbzktxbwhqmb7hgix5q.py
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# grn => add_1, add_2, mul_4, mul_5, rsqrt, sub
# sigmoid => sigmoid
# x_1 => mul_3
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_10), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_11), kwargs = {})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [n1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, buf3, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2.run(primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
del primals_11
return (buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_11
return buf7, primals_3, primals_10, buf0, reinterpret_tensor(buf1, (64,
4), (4, 1), 0), buf2, buf3, buf4, primals_8, primals_6, primals_4
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GatedResidualNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, input_0):
primals_1 = self.w1.weight
primals_2 = self.w1.bias
primals_4 = self.w2.weight
primals_5 = self.w2.bias
primals_6 = self.glu.w4.weight
primals_7 = self.glu.w4.bias
primals_8 = self.glu.w5.weight
primals_9 = self.glu.w5.bias
primals_10 = self.layer_norm.weight
primals_11 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
krodyush/training_extensions
|
GatedResidualNetwork
| false | 11,002 |
[
"Apache-2.0"
] | 0 |
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
LearnableBias
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/uz/cuzdojeecdnp6sj6qer5und3uv6oxkzgk2365i55gch5mqyktk3i.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %expand), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class LearnableBias(nn.Module):
def __init__(self, out_chn):
super(LearnableBias, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, x):
out = x + self.bias.expand_as(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_chn': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnableBiasNew(nn.Module):
def __init__(self, out_chn):
super(LearnableBiasNew, self).__init__()
self.bias = nn.Parameter(torch.zeros(out_chn), requires_grad=True)
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
uzair789/pytorch-retinanet
|
LearnableBias
| false | 11,003 |
[
"Apache-2.0"
] | 0 |
cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
https://github.com/uzair789/pytorch-retinanet/tree/cabac159a9877825ef04ab06d3b9a63bdfa4f306
|
DirichletPolicySingleLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vc/cvcseyeo77rnrxxikpcg46r3vdrbiifvnsv54imtvkqk6cn6nkzz.py
# Topologically Sorted Source Nodes: [sigmoid, action, clamp], Original ATen: [aten.sigmoid, aten.mul, aten.clamp]
# Source node to ATen node mapping:
# action => mul
# clamp => clamp_max, clamp_min
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, inf), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, -inf), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, inf), kwargs = {})
triton_poi_fused_clamp_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_clamp_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = float("inf")
tmp3 = tmp1 * tmp2
tmp4 = float("-inf")
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, action, clamp], Original ATen: [aten.sigmoid, aten.mul, aten.clamp]
triton_poi_fused_clamp_mul_sigmoid_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.distributions as td
import torch.nn as nn
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayer(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, state):
x = F.relu(self.linear1(state))
action = self.max_alpha * F.sigmoid(self.linear2(x))
return torch.clamp(action, self.min_alpha, self.max_alpha)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.distributions as td
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = float('inf')
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf4, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_sigmoid_1[grid(256)](buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), buf2, primals_4, buf4
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayerNew(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wessle/costaware
|
DirichletPolicySingleLayer
| false | 11,004 |
[
"MIT"
] | 0 |
151502308411528eaa703d353d138fc809e59d8e
|
https://github.com/wessle/costaware/tree/151502308411528eaa703d353d138fc809e59d8e
|
PinballLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/n3/cn3ibaenux4ljrzrug2suadaqyb4uuj4p27xmsnxrco7of4fcu76.py
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, pinball_1], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
# Source node to ATen node mapping:
# delta_y => sub
# mul => mul
# mul_1 => mul_1
# pinball => maximum
# pinball_1 => mean
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (0.5, %sub), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (-0.5, %sub), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%maximum,), kwargs = {})
triton_per_fused_maximum_mean_mul_sub_0 = async_compile.triton('triton_per_fused_maximum_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_maximum_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, pinball_1], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class PinballLoss(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(PinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean()
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PinballLossNew(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(PinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
venkatkorapaty/esrnn
|
PinballLoss
| false | 11,005 |
[
"MIT"
] | 0 |
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
AdaptiveInstanceNormalization
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/uy/cuyl7xcjun4wsvs7ninygw4d3dhoz7p4pc6pohpbwomr2rgrbekq.py
# Topologically Sorted Source Nodes: [x, mul, x_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# x => add, rsqrt, var_mean
# x_1 => add_1
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = tmp23 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mul, x_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class AdaptiveInstanceNormalization(nn.Module):
"""Some Information about AdaptiveInstanceNormalization"""
def __init__(self, channels, style_dim):
super(AdaptiveInstanceNormalization, self).__init__()
self.affine = nn.Linear(style_dim, channels * 2)
self.norm = nn.InstanceNorm2d(channels)
def forward(self, x, style):
scale, bias = self.affine(style).chunk(2, dim=1)
scale = scale.unsqueeze(2).unsqueeze(3)
bias = bias.unsqueeze(2).unsqueeze(3)
x = self.norm(x)
x = x * scale + bias
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp26 = tmp24 + tmp25
tmp27 = tmp23 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaptiveInstanceNormalizationNew(nn.Module):
"""Some Information about AdaptiveInstanceNormalization"""
def __init__(self, channels, style_dim):
super(AdaptiveInstanceNormalizationNew, self).__init__()
self.affine = nn.Linear(style_dim, channels * 2)
self.norm = nn.InstanceNorm2d(channels)
def forward(self, input_0, input_1):
primals_1 = self.affine.weight
primals_2 = self.affine.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
uthree/gan-image-generator
|
AdaptiveInstanceNormalization
| false | 11,006 |
[
"MIT"
] | 0 |
85585e389b5a494393da0789d82824f8c811e263
|
https://github.com/uthree/gan-image-generator/tree/85585e389b5a494393da0789d82824f8c811e263
|
upsampleBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cg/ccgupjidh7vzwqy3i33hof3c5b5ytgxw3bw52fvz6g4mshaw5o5v.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*(x0 % 2)) + (32*(x1 % 2)) + (64*x2) + (x0 // 2)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def swish(x):
return x * F.sigmoid(x)
class upsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return swish(self.shuffler(self.conv(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def swish(x):
return x * F.sigmoid(x)
class upsampleBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tomron27/srganus
|
upsampleBlock
| false | 11,007 |
[
"Apache-2.0"
] | 0 |
5dab73540535138375203bf31e31246cd203f3c0
|
https://github.com/tomron27/srganus/tree/5dab73540535138375203bf31e31246cd203f3c0
|
DisaggregatedPinballLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5i/c5ibxuxwusa2zatlx57lacqjwkudxg7ycyl43pej6jltws4k4fp2.py
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, mean], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
# Source node to ATen node mapping:
# delta_y => sub
# mean => mean
# mul => mul
# mul_1 => mul_1
# pinball => maximum
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (0.5, %sub), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (-0.5, %sub), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%maximum, [0]), kwargs = {})
triton_poi_fused_maximum_mean_mul_sub_0 = async_compile.triton('triton_poi_fused_maximum_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mean_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mean_mul_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp16 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp23 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp10 = tmp8 - tmp9
tmp11 = tmp3 * tmp10
tmp12 = tmp5 * tmp10
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp7 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp3 * tmp17
tmp19 = tmp5 * tmp17
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp14 + tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp3 * tmp24
tmp26 = tmp5 * tmp24
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp21 + tmp27
tmp29 = 4.0
tmp30 = tmp28 / tmp29
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/oc/cocqkj2axh4xoigzadg3a7ugeyqt2npq3fq4w52skrbor3whkeci.py
# Topologically Sorted Source Nodes: [pinball_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# pinball_1 => mean_1
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [1]), kwargs = {})
triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, mean], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_maximum_mean_mul_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pinball_1], Original ATen: [aten.mean]
triton_poi_fused_mean_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class DisaggregatedPinballLoss(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(DisaggregatedPinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean(axis=0).mean(axis=1)
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_maximum_mean_mul_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp9 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp16 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp23 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp10 = tmp8 - tmp9
tmp11 = tmp3 * tmp10
tmp12 = tmp5 * tmp10
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp7 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp3 * tmp17
tmp19 = tmp5 * tmp17
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp14 + tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp3 * tmp24
tmp26 = tmp5 * tmp24
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = tmp21 + tmp27
tmp29 = 4.0
tmp30 = tmp28 / tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mean_mul_sub_0[grid(64)](arg1_1, arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf1,
class DisaggregatedPinballLossNew(nn.Module):
""" Pinball Loss
Computes the pinball loss between y and y_hat.
Parameters
----------
y: tensor
actual values in torch tensor.
y_hat: tensor (same shape as y)
predicted values in torch tensor.
tau: float, between 0 and 1
the slope of the pinball loss, in the context of
quantile regression, the value of tau determines the
conditional quantile level.
Returns
----------
pinball_loss:
average accuracy for the predicted quantile
"""
def __init__(self, tau=0.5):
super(DisaggregatedPinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
venkatkorapaty/esrnn
|
DisaggregatedPinballLoss
| false | 11,008 |
[
"MIT"
] | 0 |
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
MegatronGelu
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wz/cwzxjbdv6wlz2eluu4zsssyahxeyb63h7yyvazavdjht5bgqof5d.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
# Source node to ATen node mapping:
# add => add
# erf => erf
# mul => mul
# mul_1 => mul_1
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1.41421), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071085623775818
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class MegatronGelu(torch.nn.Module):
def forward(self, x):
return x * 0.5 * (torch.erf(x / 1.41421) + 1.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071085623775818
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
MegatronGelu
| false | 11,009 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
InteractiveKLLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xg/cxg6geasclvgycjnyaybokxud5rdp2fe6eropfaplher4ysvlw4g.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/25/c252yxkeoy2jzoudseyd3vkmxj6p5ehtiqnttglx2n27knsfiyad.py
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean]
# Source node to ATen node mapping:
# kl_div => eq, full_default, full_default_1, isnan, log_1, mean, mul, mul_1, sub_3, where, where_1
# log_softmax => exp, log, sub_1, sum_1
# softmax => div_2, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (r3), None)
tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float("nan")
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 256.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp37, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean]
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class InteractiveKLLoss(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, student, teacher):
return self.kl_loss(F.log_softmax(student / self.temperature, dim=1
), F.softmax(teacher / self.temperature, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 256.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)](
buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class InteractiveKLLossNew(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
pkuyym/nni
|
InteractiveKLLoss
| false | 11,010 |
[
"MIT"
] | 0 |
fe533e3bc65ea27997e16250adb503638548d500
|
https://github.com/pkuyym/nni/tree/fe533e3bc65ea27997e16250adb503638548d500
|
LevelVariabilityLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ok/cok532ddqmz6anqcngyyyvukjpngrtca54uc3pkaq22qiywlnfqt.py
# Topologically Sorted Source Nodes: [diff, level_var_loss, mean, level_var_loss_1], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# diff => sub_1
# level_var_loss => pow_1
# level_var_loss_1 => mul
# mean => mean
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %slice_8), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
triton_per_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [diff, level_var_loss, mean, level_var_loss_1], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0.run(buf1, arg0_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class LevelVariabilityLoss(nn.Module):
""" Level Variability Loss
Computes the variability penalty for the level.
Parameters
----------
levels: tensor with shape (batch, n_time)
levels obtained from exponential smoothing component of ESRNN
level_variability_penalty: float
this parameter controls the strength of the penalization
to the wigglines of the level vector, induces smoothness
in the output
Returns
----------
level_var_loss:
wiggliness loss for the level vector
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLoss, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, levels):
assert levels.shape[1] > 2
level_prev = torch.log(levels[:, :-1])
level_next = torch.log(levels[:, 1:])
log_diff_of_levels = torch.sub(level_prev, level_next)
log_diff_prev = log_diff_of_levels[:, :-1]
log_diff_next = log_diff_of_levels[:, 1:]
diff = torch.sub(log_diff_prev, log_diff_next)
level_var_loss = diff ** 2
level_var_loss = level_var_loss.mean() * self.level_variability_penalty
return level_var_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'level_variability_penalty': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, 1, 128,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LevelVariabilityLossNew(nn.Module):
""" Level Variability Loss
Computes the variability penalty for the level.
Parameters
----------
levels: tensor with shape (batch, n_time)
levels obtained from exponential smoothing component of ESRNN
level_variability_penalty: float
this parameter controls the strength of the penalization
to the wigglines of the level vector, induces smoothness
in the output
Returns
----------
level_var_loss:
wiggliness loss for the level vector
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLossNew, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
venkatkorapaty/esrnn
|
LevelVariabilityLoss
| false | 11,011 |
[
"MIT"
] | 0 |
411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
https://github.com/venkatkorapaty/esrnn/tree/411d3191e7e12f29e521e06bc18f9b9b0fdf0f0c
|
L1ExactPenaltyConstraintLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ub/cubaqw7n5fcx2yskdakyray3737itlsr3yy5hjcyujinajd7myq2.py
# Topologically Sorted Source Nodes: [gap_constraint, norm], Original ATen: [aten.relu, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# gap_constraint => relu
# norm => abs_1, pow_2, sum_1
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%relu,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {})
triton_per_fused_linalg_vector_norm_relu_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gap_constraint, norm], Original ATen: [aten.relu, aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.nn import functional as F
class L1ExactPenaltyConstraintLoss(nn.Module):
def __init__(self):
super(L1ExactPenaltyConstraintLoss, self).__init__()
def forward(self, x):
gap_constraint = F.relu(x)
return torch.norm(gap_constraint, p=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class L1ExactPenaltyConstraintLossNew(nn.Module):
def __init__(self):
super(L1ExactPenaltyConstraintLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ykt345/fairtorch
|
L1ExactPenaltyConstraintLoss
| false | 11,012 |
[
"MIT"
] | 0 |
fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
https://github.com/ykt345/fairtorch/tree/fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
L2PenaltyConstraintLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/cf/ccfxnxqg4a6oao542iox32prxejyezlv3dkg257c4mnoade4cvaf.py
# Topologically Sorted Source Nodes: [gap_constraint, norm], Original ATen: [aten.relu, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# gap_constraint => relu
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_per_fused_linalg_vector_norm_relu_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gap_constraint, norm], Original ATen: [aten.relu, aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.nn import functional as F
class L2PenaltyConstraintLoss(nn.Module):
def __init__(self):
super(L2PenaltyConstraintLoss, self).__init__()
def forward(self, x):
gap_constraint = F.relu(x)
return torch.norm(gap_constraint, p=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_relu_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = libdevice.sqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_relu_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class L2PenaltyConstraintLossNew(nn.Module):
def __init__(self):
super(L2PenaltyConstraintLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ykt345/fairtorch
|
L2PenaltyConstraintLoss
| false | 11,013 |
[
"MIT"
] | 0 |
fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
https://github.com/ykt345/fairtorch/tree/fe7e0cfaec3de0fc2b9c92943bb02639acd46bb4
|
MegatronFastGelu
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ji/cji2qjd2s4notypk235c2icz2ebwl7bakixf7bgziian5ft7m2cx.py
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# tanh => tanh
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.7978845608028654), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.044715), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608028654
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class MegatronFastGelu(torch.nn.Module):
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 +
0.044715 * x * x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608028654
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MegatronFastGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
MegatronFastGelu
| false | 11,014 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
UpsampleBLock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/p3/cp32zuxrp2cknaaat4l46gcxlkrjzggsmqqhfyznul7wqfb4ebec.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gv/cgvzuomkau6pryvjxolqjxpfa3tqszfhfsuflbdz2eltegfziozj.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# x_2 => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_1 = async_compile.triton('triton_poi_fused__prelu_kernel_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*(x0 % 2)) + (32*(x1 % 2)) + (64*x2) + (x0 // 2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_1.run(buf1, primals_4, buf2, 1024, grid=grid(1024), stream=stream0)
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
class UpsampleBLock(nn.Module):
def __init__(self, in_channels):
super(UpsampleBLock, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * 2 ** 2,
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.prelu = nn.PReLU()
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
x = self.prelu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
triton_poi_fused__prelu_kernel_1[grid(1024)](buf1, primals_4, buf2,
1024, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, primals_4, buf1
class UpsampleBLockNew(nn.Module):
def __init__(self, in_channels):
super(UpsampleBLockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * 2 ** 2,
kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.prelu = nn.PReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.prelu.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tomron27/srganus
|
UpsampleBLock
| false | 11,015 |
[
"Apache-2.0"
] | 0 |
5dab73540535138375203bf31e31246cd203f3c0
|
https://github.com/tomron27/srganus/tree/5dab73540535138375203bf31e31246cd203f3c0
|
HuggingfaceFastGelu
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/cq/ccqrv5daquzjwissidd23uotfpy5s5jx7uitdgmgmdd6b44oid4b.py
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# tanh => tanh
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.7978845608), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.044715), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, mul_2, mul_3, add, mul_4, tanh, add_1, mul_5], Original ATen: [aten.mul, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class HuggingfaceFastGelu(torch.nn.Module):
def forward(self, x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 +
0.044715 * x * x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7978845608
tmp4 = tmp0 * tmp3
tmp5 = 0.044715
tmp6 = tmp0 * tmp5
tmp7 = tmp6 * tmp0
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp11 + tmp8
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HuggingfaceFastGeluNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
thilow/onnxruntime
|
HuggingfaceFastGelu
| false | 11,016 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
NeuralNetNonDifferentiableOutput
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/es/cesdgivumahwau3ktpnkfn4fdmvsesqfozw5gmx5z6jgpd2mkntn.py
# Topologically Sorted Source Nodes: [out1, mask1, mask1_1], Original ATen: [aten.relu, aten.gt, aten._to_copy]
# Source node to ATen node mapping:
# mask1 => gt
# mask1_1 => convert_element_type
# out1 => relu
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%relu, 0.01), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.int64), kwargs = {})
triton_poi_fused__to_copy_gt_relu_0 = async_compile.triton('triton_poi_fused__to_copy_gt_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_gt_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.01
tmp6 = tmp4 > tmp5
tmp7 = tmp6.to(tl.int64)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/db/cdbnihrylojvzpxqsddewcmpwhm3r56pohj377wckbcmu6m4bpxc.py
# Topologically Sorted Source Nodes: [mask2, mask2_1], Original ATen: [aten.lt, aten._to_copy]
# Source node to ATen node mapping:
# mask2 => lt
# mask2_1 => convert_element_type_1
# Graph fragment:
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%view_3, 0.02), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.int64), kwargs = {})
triton_poi_fused__to_copy_lt_1 = async_compile.triton('triton_poi_fused__to_copy_lt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_lt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_lt_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.02
tmp2 = tmp0 < tmp1
tmp3 = tmp2.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [out1, mask1, mask1_1], Original ATen: [aten.relu, aten.gt, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_gt_relu_0.run(buf1, primals_2, buf3, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [mask2, mask2_1], Original ATen: [aten.lt, aten._to_copy]
triton_poi_fused__to_copy_lt_1.run(buf2, buf4, 256, grid=grid(256), stream=stream0)
return (buf1, buf3, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class NeuralNetNonDifferentiableOutput(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutput, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out1 = self.relu(out)
out2 = self.fc2(out1)
mask1 = torch.gt(out1, 0.01)
mask1 = mask1.long()
mask2 = torch.lt(out2, 0.02)
mask2 = mask2.long()
return out1, mask1, out2, mask2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy_gt_relu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.01
tmp6 = tmp4 > tmp5
tmp7 = tmp6.to(tl.int64)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_lt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.02
tmp2 = tmp0 < tmp1
tmp3 = tmp2.to(tl.int64)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_relu_0[grid(256)](buf1, primals_2,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
triton_poi_fused__to_copy_lt_1[grid(256)](buf2, buf4, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf1, buf3, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, primals_4
class NeuralNetNonDifferentiableOutputNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetNonDifferentiableOutputNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2], output[3]
|
thilow/onnxruntime
|
NeuralNetNonDifferentiableOutput
| false | 11,017 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
TemperatureHolder
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/xp/cxpd2bcgjjguvsbnowhprcscim22tcfdskii4vimpwk2r73oeben.py
# Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# exp => exp
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_0.run(primals_1, buf0, 1, grid=grid(1), stream=stream0)
del primals_1
return (buf0, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class TemperatureHolder(nn.Module):
"""Module that holds a temperature as a learnable value.
Args:
initial_log_temperature (float): Initial value of log(temperature).
"""
def __init__(self, initial_log_temperature=0):
super().__init__()
self.log_temperature = nn.Parameter(torch.tensor(
initial_log_temperature, dtype=torch.float32))
def forward(self):
"""Return a temperature as a torch.Tensor."""
return torch.exp(self.log_temperature)
def get_inputs():
return []
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl_math.exp(tmp1)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del primals_1
return buf0, buf0
class TemperatureHolderNew(nn.Module):
"""Module that holds a temperature as a learnable value.
Args:
initial_log_temperature (float): Initial value of log(temperature).
"""
def __init__(self, initial_log_temperature=0):
super().__init__()
self.log_temperature = nn.Parameter(torch.tensor(
initial_log_temperature, dtype=torch.float32))
def forward(self):
primals_1 = self.log_temperature
output = call([primals_1])
return output[0]
|
tarokiritani/pfrl
|
TemperatureHolder
| false | 11,018 |
[
"MIT"
] | 0 |
284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
https://github.com/tarokiritani/pfrl/tree/284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
NeuralNetMultiplePositionalArguments
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
# Source node to ATen node mapping:
# model_input => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf4, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArguments(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArguments, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out = self.fc1(model_input)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_4, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(
buf2, (64, 4), (4, 1), 0), primals_5, buf4
class NeuralNetMultiplePositionalArgumentsNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArguments
| false | 11,019 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
# Source node to ATen node mapping:
# model_input => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out1_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_4, buf6, 256, grid=grid(256), stream=stream0)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out2_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf4, primals_6, buf5, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out2 = self.fc2(model_input)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf4,
primals_6, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, buf6
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew(
torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
| false | 11,020 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
FeedForwardLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden_states_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class FeedForwardLayer(nn.Module):
def __init__(self, hidden_size):
super(FeedForwardLayer, self).__init__()
self.linear_1 = nn.Linear(hidden_size, 4 * hidden_size)
self.linear_2 = nn.Linear(4 * hidden_size, hidden_size)
self.relu = nn.ReLU()
def forward(self, hidden_states):
hidden_states = self.linear_1(hidden_states)
hidden_states = self.relu(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3
class FeedForwardLayerNew(nn.Module):
def __init__(self, hidden_size):
super(FeedForwardLayerNew, self).__init__()
self.linear_1 = nn.Linear(hidden_size, 4 * hidden_size)
self.linear_2 = nn.Linear(4 * hidden_size, hidden_size)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yongho94/Transformers_NMT
|
FeedForwardLayer
| false | 11,021 |
[
"MIT"
] | 0 |
14fb08a6b1391da4d49f199dc16d7beb37620c98
|
https://github.com/yongho94/Transformers_NMT/tree/14fb08a6b1391da4d49f199dc16d7beb37620c98
|
NeuralNetPartialNoGradModel
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/au/cau6qypw2vz4drppp6yr6chutchyhnniousxhhlq2y5r3yu3gep5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class NeuralNetPartialNoGradModel(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModel, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, model_input):
out = self.relu(self.fc1(model_input))
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
del primals_3
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
class NeuralNetPartialNoGradModelNew(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetPartialNoGradModelNew, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size).requires_grad_(
False)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
thilow/onnxruntime
|
NeuralNetPartialNoGradModel
| false | 11,022 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
ModMSELoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/56/c56fo5gmu7rkmdaeifbzyf65qov4626hcg5r3drzzcliykxm3zen.py
# Topologically Sorted Source Nodes: [max_1, max_2], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# max_2 => max_2
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg1_1, 2), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem, 2), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/en/cenud3rzz2lu4hsmdulpiwnbmbsyucrkdb7nvsyka6druxyi6bsv.py
# Topologically Sorted Source Nodes: [truediv, sub_1, pow_2, sub_2, add, truediv_1, mean, sub, pow_1, reg, sum_1, loss], Original ATen: [aten.div, aten.sub, aten.pow, aten.rsub, aten.add, aten.mean, aten.mul, aten.sum]
# Source node to ATen node mapping:
# add => add
# loss => add_1
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# reg => mul
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %expand), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 0.1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.0625), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %sum_1), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (r2), None)
tmp14 = tl.load(in_ptr3 + (r2), None)
tmp2 = tmp0 / tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp3
tmp8 = 0.1
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp15 = tmp6 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = 0.0625
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp13 / tmp22
tmp24 = tmp23 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, max_2], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg1_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [truediv, sub_1, pow_2, sub_2, add, truediv_1, mean, sub, pow_1, reg, sum_1, loss], Original ATen: [aten.div, aten.sub, aten.pow, aten.rsub, aten.add, aten.mean, aten.mul, aten.sum]
triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1.run(buf3, arg1_1, buf0, arg2_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class ModMSELoss(torch.nn.Module):
def __init__(self, shape_r_gt, shape_c_gt):
super(ModMSELoss, self).__init__()
self.shape_r_gt = shape_r_gt
self.shape_c_gt = shape_c_gt
def forward(self, output, label, prior):
prior_size = prior.shape
output_max = torch.max(torch.max(output, 2)[0], 2)[0].unsqueeze(2
).unsqueeze(2).expand(output.shape[0], output.shape[1], self.
shape_r_gt, self.shape_c_gt)
reg = 1.0 / (prior_size[0] * prior_size[1]) * (1 - prior) ** 2
loss = torch.mean((output / output_max - label) ** 2 / (1 - label +
0.1)) + torch.sum(reg)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'shape_r_gt': 4, 'shape_c_gt': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + r2, None)
tmp14 = tl.load(in_ptr3 + r2, None)
tmp2 = tmp0 / tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp3
tmp8 = 0.1
tmp9 = tmp7 + tmp8
tmp10 = tmp5 / tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp15 = tmp6 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = 0.0625
tmp18 = tmp16 * tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp13 / tmp22
tmp24 = tmp23 + tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(16)](arg1_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused_add_div_mean_mul_pow_rsub_sub_sum_1[grid(1)](buf3,
arg1_1, buf0, arg2_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del buf0
return buf3,
class ModMSELossNew(torch.nn.Module):
def __init__(self, shape_r_gt, shape_c_gt):
super(ModMSELossNew, self).__init__()
self.shape_r_gt = shape_r_gt
self.shape_c_gt = shape_c_gt
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
yyuting/learning_from_program_trace
|
ModMSELoss
| false | 11,023 |
[
"MIT"
] | 0 |
e0e4ac9bc2d4069eef64bdc2de64a87a735fa508
|
https://github.com/yyuting/learning_from_program_trace/tree/e0e4ac9bc2d4069eef64bdc2de64a87a735fa508
|
PositionWiseFeedForward
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# n2 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/en/cenabni3h77zazokaukc6skf7kuv4ta2awact6sfvxbq7che2ucu.py
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# grn => var_mean
# sigmoid => sigmoid
# x_1 => mul_3
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uj/cuj7uutcqmly235uixsc4lxbmmowhsqrjmbzktxbwhqmb7hgix5q.py
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# grn => add_1, add_2, mul_4, mul_5, rsqrt, sub
# sigmoid => sigmoid
# x_1 => mul_3
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_7), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_10), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_11), kwargs = {})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sa/csam4qbluetpmxn3expxgvgvulpbjidzlkrgpt7lcqb7aa4p6d5x.py
# Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add_1 => add_3
# out => var_mean_1
# sigmoid_1 => sigmoid_1
# x_3 => mul_6
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_9,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %view_11), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_16), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bw/cbw3jqoeotoyoa3d4nplpzdidkkeatsdk7ho5hysa2lq3gbdtqjv.py
# Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add_1 => add_3
# out => add_4, add_5, mul_7, mul_8, rsqrt_1, sub_1
# sigmoid_1 => sigmoid_1
# x_3 => mul_6
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_9,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %view_11), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_16), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %primals_17), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %primals_18), kwargs = {})
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [n2], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [n1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1.run(primals_3, buf3, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, x_1, add, grn], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2.run(primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7, 256, grid=grid(256), stream=stream0)
del primals_11
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_13
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_15
buf10 = buf6; del buf6 # reuse
buf11 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3.run(buf8, buf9, primals_16, buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_1, x_3, add_1, out], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.native_layer_norm]
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4.run(buf8, buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12, 256, grid=grid(256), stream=stream0)
del buf10
del buf11
del primals_18
return (buf12, primals_3, primals_10, primals_16, primals_17, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9, primals_14, primals_12, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForward(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, x, skip):
out = self.grn(x)
out = self.gate_add_norm(out, skip)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp15 = tl.sigmoid(tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp22 = tl.sigmoid(tmp21)
tmp24 = tmp22 * tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_3(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp16 = tmp14 * tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp21 = tl.sigmoid(tmp20)
tmp23 = tmp21 * tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_sigmoid_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp12 = tmp7 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf2, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_9
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_1[grid(64)](
primals_3, buf3, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_2[grid(256)](
primals_3, buf3, buf4, buf5, buf6, primals_10, primals_11, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_13
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_15
buf10 = buf6
del buf6
buf11 = buf5
del buf5
triton_poi_fused_add_mul_native_layer_norm_sigmoid_3[grid(64)](buf8,
buf9, primals_16, buf10, buf11, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_sigmoid_4[grid(256)](buf8,
buf9, primals_16, buf10, buf11, primals_17, primals_18, buf12,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf10
del buf11
del primals_18
return (buf12, primals_3, primals_10, primals_16, primals_17, buf0,
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4,
reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf8, buf9,
primals_14, primals_12, primals_8, primals_6, primals_4)
class GatedLinearUnit(nn.Module):
def __init__(self, input_size, output_size, dropout=0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.w4 = nn.Linear(input_size, output_size)
self.w5 = nn.Linear(input_size, output_size)
self.act = nn.Sigmoid()
def forward(self, x):
x = self.dropout(x)
x = self.act(self.w4(x)) * self.w5(x)
return x
class GateAddNorm(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.glu = GatedLinearUnit(input_size, output_size, dropout)
self.norm = nn.LayerNorm(output_size)
def forward(self, x, skip):
return self.norm(self.glu(x) + skip)
class GatedResidualNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, context_size=
None, dropout=0):
super().__init__()
self.w1 = nn.Linear(hidden_size, hidden_size)
self.w2 = nn.Linear(input_size, hidden_size)
self.w3 = None if context_size is None else nn.Linear(context_size,
hidden_size, bias=False)
self.glu = GatedLinearUnit(hidden_size, output_size, dropout)
self.layer_norm = nn.LayerNorm(output_size)
self.residual = nn.Sequential(
) if input_size == output_size else nn.Linear(input_size,
output_size)
def forward(self, a, c=None):
if c is not None:
n2 = F.elu(self.w2(a) + self.w3(c))
else:
n2 = F.elu(self.w2(a))
n1 = self.w1(n2)
grn = self.layer_norm(self.residual(a) + self.glu(n1))
return grn
class PositionWiseFeedForwardNew(nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.grn = GatedResidualNetwork(input_size=input_size, hidden_size=
input_size, output_size=output_size, dropout=dropout)
self.gate_add_norm = GateAddNorm(input_size=input_size, output_size
=output_size, dropout=dropout)
def forward(self, input_0, input_1):
primals_1 = self.grn.w1.weight
primals_2 = self.grn.w1.bias
primals_4 = self.grn.w2.weight
primals_5 = self.grn.w2.bias
primals_6 = self.grn.glu.w4.weight
primals_7 = self.grn.glu.w4.bias
primals_8 = self.grn.glu.w5.weight
primals_9 = self.grn.glu.w5.bias
primals_10 = self.grn.layer_norm.weight
primals_11 = self.grn.layer_norm.bias
primals_12 = self.gate_add_norm.glu.w4.weight
primals_13 = self.gate_add_norm.glu.w4.bias
primals_14 = self.gate_add_norm.glu.w5.weight
primals_15 = self.gate_add_norm.glu.w5.bias
primals_17 = self.gate_add_norm.norm.weight
primals_18 = self.gate_add_norm.norm.bias
primals_3 = input_0
primals_16 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
krodyush/training_extensions
|
PositionWiseFeedForward
| false | 11,024 |
[
"Apache-2.0"
] | 0 |
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
# Source node to ATen node mapping:
# model_input => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sb/csbqfhl3tbhobxxibww6rnv4q33jyajqsvetse4kiun22xct43oo.py
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out1_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency(torch.
nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency,
self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out1 = self.relu(out1)
out2 = self.fc2(out1)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, primals_5
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
thilow/onnxruntime
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency
| false | 11,025 |
[
"MIT"
] | 0 |
1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
https://github.com/thilow/onnxruntime/tree/1a3ddf0714e1bdf9b807a342eee5f6e160ad1ec9
|
BertOutput
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/2j/c2j3iqfivxp4slgdp5wwpyv7egzeitk5o2kiyeptosrthyamlxof.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_2 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertOutput(nn.Module):
"""BERTのTransformerBlockモジュールのFeedForwardです"""
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
"""
hidden_states: BertIntermediateの出力テンソル
input_tensor:BertAttentionの出力テンソル
"""
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertOutputNew(nn.Module):
"""BERTのTransformerBlockモジュールのFeedForwardです"""
def __init__(self, config):
super(BertOutputNew, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.gamma
primals_6 = self.LayerNorm.beta
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Cyndi-Tokyotech/Fin_Text_Analysis_ML
|
BertOutput
| false | 11,026 |
[
"MIT"
] | 0 |
7f9b6c1ea78f8e6f32c003b2de32809722df88d4
|
https://github.com/Cyndi-Tokyotech/Fin_Text_Analysis_ML/tree/7f9b6c1ea78f8e6f32c003b2de32809722df88d4
|
MultiHeadAttentionLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/73/c733obv6iqsxnez5iluzcy7lys2vm32ydk5b6zhwfwhojsazhp5v.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/re/cre6hfbee3vydxqwkirdeh4ubis6odh7hojbjikdb74cqttxgtxm.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_div_2 = async_compile.triton('triton_poi_fused_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ap/capcjcuji5yjxnxqzhjjgiyi624adjk4gfpukbdhlyhs3b6jf32m.py
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# att => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tk/ctkefobytpo25r3eoepr2gixygwjbxnc2fboxyew6d6xg4xfac5m.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => amax, div_1, exp, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_9, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_9, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_4 = async_compile.triton('triton_per_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
del primals_6
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
triton_poi_fused_div_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf4, primals_7, buf7, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_7
buf8 = reinterpret_tensor(buf4, (16, 1, 16), (16, 16, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf7, (16, 1, 16), (16, 0, 1), 0), out=buf8)
buf11 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_4.run(buf8, buf11, 16, 16, grid=grid(16), stream=stream0)
buf12 = reinterpret_tensor(buf8, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf5, primals_10, buf12, 16, 16, grid=grid(16, 16), stream=stream0)
del buf5
del primals_10
buf13 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf12, (16, 16, 1), (16, 1, 0), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf13, (4, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf14, buf11, primals_1, buf2, reinterpret_tensor(primals_8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (64, 4), (4, 1), 0), buf11, reinterpret_tensor(buf13, (4, 4), (4, 1), 0), primals_12, reinterpret_tensor(buf12, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 1), (1, 1, 4), 0), reinterpret_tensor(buf7, (16, 16, 1), (16, 1, 16), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
assert hidden_dim % n_heads == 0
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.head_dim = hidden_dim // n_heads
self.fc_q = nn.Linear(hidden_dim, hidden_dim)
self.fc_k = nn.Linear(hidden_dim, hidden_dim)
self.fc_v = nn.Linear(hidden_dim, hidden_dim)
self.fc_o = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(hidden_dim, eps=1e-06)
self.scale = math.sqrt(self.head_dim)
def forward(self, q, k, v, mask=None):
batch_size = q.size(0)
q = self.layer_norm(q)
q = self.fc_q(q)
k = self.fc_k(k)
v = self.fc_v(v)
q = q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
k = k.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
v = v.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
att = torch.matmul(q / self.scale, k.permute(0, 1, 3, 2))
if mask is not None:
att = att.masked_fill(mask == 0, -10000000000.0)
att = torch.softmax(att, dim=-1)
out = torch.matmul(self.dropout(att), v)
out = out.permute(0, 2, 1, 3).contiguous()
out = out.view(batch_size, self.hidden_dim)
out = self.dropout(self.fc_o(out))
return out, att
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4,
4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_div_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
del primals_6
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_11, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf6 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf3
triton_poi_fused_div_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32
)
triton_poi_fused_clone_3[grid(16, 16)](buf4, primals_7, buf7, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_7
buf8 = reinterpret_tensor(buf4, (16, 1, 16), (16, 16, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 1), (1, 0, 0),
0), reinterpret_tensor(buf7, (16, 1, 16), (16, 0, 1), 0), out=buf8)
buf11 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.
float32)
triton_per_fused__softmax_4[grid(16)](buf8, buf11, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf8
triton_poi_fused_clone_3[grid(16, 16)](buf5, primals_10, buf12, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf5
del primals_10
buf13 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 1, 16), (16, 16,
1), 0), reinterpret_tensor(buf12, (16, 16, 1), (16, 1, 0), 0),
out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf13, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_13
return buf14, buf11, primals_1, buf2, reinterpret_tensor(primals_8, (64,
4), (4, 1), 0), reinterpret_tensor(primals_11, (64, 4), (4, 1), 0
), buf11, reinterpret_tensor(buf13, (4, 4), (4, 1), 0
), primals_12, reinterpret_tensor(buf12, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf6, (16, 1, 1), (1, 1, 4), 0
), reinterpret_tensor(buf7, (16, 16, 1), (16, 1, 16), 0), primals_4
class MultiHeadAttentionLayerNew(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
assert hidden_dim % n_heads == 0
self.hidden_dim = hidden_dim
self.n_heads = n_heads
self.head_dim = hidden_dim // n_heads
self.fc_q = nn.Linear(hidden_dim, hidden_dim)
self.fc_k = nn.Linear(hidden_dim, hidden_dim)
self.fc_v = nn.Linear(hidden_dim, hidden_dim)
self.fc_o = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(hidden_dim, eps=1e-06)
self.scale = math.sqrt(self.head_dim)
def forward(self, input_0, input_1, input_2):
primals_1 = self.fc_q.weight
primals_2 = self.fc_q.bias
primals_4 = self.fc_k.weight
primals_3 = self.fc_k.bias
primals_6 = self.fc_v.weight
primals_5 = self.fc_v.bias
primals_9 = self.fc_o.weight
primals_7 = self.fc_o.bias
primals_10 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
primals_8 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
wenjunyoung/PAN_PLUS
|
MultiHeadAttentionLayer
| false | 11,027 |
[
"Apache-2.0"
] | 0 |
c893ff4775c8ff137a21c15d34fb93b9394dbfe5
|
https://github.com/wenjunyoung/PAN_PLUS/tree/c893ff4775c8ff137a21c15d34fb93b9394dbfe5
|
UpsampleConvLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ms/cmsngihz7hc5beia3yef6zrp6xf5ezd674joaxrbgyj4zkg2icam.py
# Topologically Sorted Source Nodes: [x_in, out], Original ATen: [aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index_1, _unsafe_index_2
# x_in => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_1, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused__unsafe_index_reflection_pad2d_0 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 20) % 20
x0 = xindex % 20
x2 = (xindex // 400)
x5 = xindex
tmp0 = 15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-2) + x1)))))
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = 15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-2) + x0)))))
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/d7/cd7mnmoedo2dtwtv7hyttuplhxkwjlorhbueq7asbsz33jdjqfow.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 289) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_in, out], Original ATen: [aten._unsafe_index, aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_index_reflection_pad2d_0.run(primals_1, buf0, 6400, grid=grid(6400), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 17, 17), (1156, 289, 17, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 4624, grid=grid(4624), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample
):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
x_in = x
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'upsample': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 20 % 20
x0 = xindex % 20
x2 = xindex // 400
x5 = xindex
tmp0 = 15 + -1 * tl_math.abs(-15 + tl_math.abs(-2 + x1))
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = 15 + -1 * tl_math.abs(-15 + tl_math.abs(-2 + x0))
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4624
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 289 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(6400)](primals_1
, buf0, 6400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 17, 17), (1156, 289, 17, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(4624)](buf2, primals_3, 4624,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class UpsampleConvLayerNew(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample
):
super(UpsampleConvLayerNew, self).__init__()
self.upsample = upsample
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yuweiliandrew/openrtist
|
UpsampleConvLayer
| false | 11,028 |
[
"Apache-2.0"
] | 0 |
4b6b17e77587751593d5e529b154e60513de3236
|
https://github.com/yuweiliandrew/openrtist/tree/4b6b17e77587751593d5e529b154e60513de3236
|
Attention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/4n/c4nqpqrkqtv2c5ffrkecrmkabe25txkbprcacrmvb4yojiniyowg.py
# Topologically Sorted Source Nodes: [repeat, expanded_pu, pow_1, add, sum_1, query_norm, pow_2, add_1, sum_2, doc_norm, mul, prod, norm_prod, cos_sim_raw], Original ATen: [aten.repeat, aten.view, aten.pow, aten.add, aten.sum, aten.sqrt, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# cos_sim_raw => div
# doc_norm => sqrt_1
# expanded_pu => view
# mul => mul
# norm_prod => mul_1
# pow_1 => pow_1
# pow_2 => pow_2
# prod => sum_3
# query_norm => sqrt
# repeat => repeat
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_1, [1, 4]), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%repeat, [4, -1]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-05), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [1]), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-05), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %primals_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %sqrt_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %mul_1), kwargs = {})
triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0 = async_compile.triton('triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (r1 % 4)), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp8 * tmp8
tmp10 = tmp9 + tmp2
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp0 * tmp8
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp7)
tmp21 = libdevice.sqrt(tmp14)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vm/cvmz5zb5hklcqpb7jp3aicaos5mk3fnzhheoduici43zwr4y2zyd.py
# Topologically Sorted Source Nodes: [attention_values], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_values => amax, div_1, exp, sub, sum_4
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_4), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [repeat, expanded_pu, pow_1, add, sum_1, query_norm, pow_2, add_1, sum_2, doc_norm, mul, prod, norm_prod, cos_sim_raw], Original ATen: [aten.repeat, aten.view, aten.pow, aten.add, aten.sum, aten.sqrt, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0.run(buf3, primals_1, primals_2, 4, 16, grid=grid(4), stream=stream0)
del primals_1
del primals_2
buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [fc_layers], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (1, 4), (0, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_3
del primals_4
buf7 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [attention_values], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 1, 4, grid=grid(1), stream=stream0)
del buf4
return (buf7, reinterpret_tensor(buf3, (1, 4), (4, 1), 0), buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
def activation_func(name):
name = name.lower()
if name == 'sigmoid':
return torch.nn.Sigmoid()
elif name == 'tanh':
return torch.nn.Tanh()
elif name == 'relu':
return torch.nn.ReLU()
elif name == 'softmax':
return torch.nn.Softmax()
elif name == 'leaky_relu':
return torch.nn.LeakyReLU(0.1)
else:
return torch.nn.Sequential()
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1 ** 2 + 1e-05, 1))
doc_norm = torch.sqrt(torch.sum(input2 ** 2 + 1e-05, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class Attention(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(Attention, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func
(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, pu, mp):
expanded_pu = pu.repeat(1, len(mp)).view(len(mp), -1)
inputs = cosine_similarity(expanded_pu, mp)
fc_layers = self.fc_layer(inputs)
attention_values = self.soft_max_layer(fc_layers)
return attention_values
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 16])]
def get_init_inputs():
return [[], {'n_k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + r1 % 4), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = 1e-05
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp8 * tmp8
tmp10 = tmp9 + tmp2
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp0 * tmp8
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp7)
tmp21 = libdevice.sqrt(tmp14)
tmp22 = tmp20 * tmp21
tmp23 = tmp19 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_repeat_sqrt_sum_view_0[grid(4)](buf3,
primals_1, primals_2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
del primals_2
buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (1, 4), (0,
1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf4)
del primals_3
del primals_4
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused__softmax_1[grid(1)](buf4, buf7, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
return buf7, reinterpret_tensor(buf3, (1, 4), (4, 1), 0), buf7
def activation_func(name):
name = name.lower()
if name == 'sigmoid':
return torch.nn.Sigmoid()
elif name == 'tanh':
return torch.nn.Tanh()
elif name == 'relu':
return torch.nn.ReLU()
elif name == 'softmax':
return torch.nn.Softmax()
elif name == 'leaky_relu':
return torch.nn.LeakyReLU(0.1)
else:
return torch.nn.Sequential()
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1 ** 2 + 1e-05, 1))
doc_norm = torch.sqrt(torch.sum(input2 ** 2 + 1e-05, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class AttentionNew(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(AttentionNew, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func
(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, input_0, input_1):
primals_1 = self.fc_layer.weight
primals_4 = self.fc_layer.bias
primals_3 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
xu94-nlp/Code-for-MAMO
|
Attention
| false | 11,029 |
[
"Apache-2.0"
] | 0 |
d9c6655e0660976c90c07fa096a1f5dc8328a60b
|
https://github.com/xu94-nlp/Code-for-MAMO/tree/d9c6655e0660976c90c07fa096a1f5dc8328a60b
|
AngleSimpleLinear
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize_1 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/km/ckmig5ptfj2o4ehbs23kkteksy6opupdrfewu7uhpa5zkvhn3l6r.py
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min_2
# Graph fragment:
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mm, -0.9999999), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 0.9999999), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%mm, -0.9999999), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mm, 0.9999999), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_ge_le_logical_and_2 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1, cos_theta], Original ATen: [aten.div, aten.mm]
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1; del buf1 # reuse
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and]
triton_poi_fused_clamp_ge_le_logical_and_2.run(buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf2
return (buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
class AngleSimpleLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return cos_theta.clamp(-1.0 + 1e-07, 1.0 - 1e-07),
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
from torch.nn import Parameter
from torch.nn.parameter import Parameter
import torch.onnx
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class AngleSimpleLinearNew(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
krodyush/training_extensions
|
AngleSimpleLinear
| false | 11,030 |
[
"Apache-2.0"
] | 0 |
542f4004dfbc6fc62a622065367ba4f85a703dd3
|
https://github.com/krodyush/training_extensions/tree/542f4004dfbc6fc62a622065367ba4f85a703dd3
|
FCLateActionSAQFunction
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/lg/clgg7lidrsfgaud7ua7lulq4csdjjfhoa5iu6ldcldqvnqeoprch.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/p5/cp5tfqfwrfwxlkkeq5u27dwcgidea534dshg3prbpaz2f7xapt3e.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 8), (8, 1))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_3, primals_4, buf1, 32, grid=grid(32), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf0, primals_3, buf4, 16, grid=grid(16), stream=stream0)
del buf0
del primals_3
return (buf3, primals_1, buf1, primals_5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
from torch import nn
from abc import ABCMeta
from abc import abstractmethod
import torch.nn.functional as F
def init_lecun_normal(tensor, scale=1.0):
"""Initializes the tensor with LeCunNormal."""
fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')
std = scale * np.sqrt(1.0 / fan_in)
with torch.no_grad():
return tensor.normal_(0, std)
@torch.no_grad()
def init_chainer_default(layer):
"""Initializes the layer with the chainer default.
weights with LeCunNormal(scale=1.0) and zeros as biases
"""
assert isinstance(layer, nn.Module)
if isinstance(layer, (nn.Linear, nn.Conv2d)):
init_lecun_normal(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
return layer
class MLP(nn.Module):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
last_wscale=1):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
super().__init__()
if hidden_sizes:
self.hidden_layers = nn.ModuleList()
self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
self.hidden_layers.append(nn.Linear(hin, hout))
self.hidden_layers.apply(init_chainer_default)
self.output = nn.Linear(hidden_sizes[-1], out_size)
else:
self.output = nn.Linear(in_size, out_size)
init_lecun_normal(self.output.weight, scale=last_wscale)
nn.init.zeros_(self.output.bias)
def forward(self, x):
h = x
if self.hidden_sizes:
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
return self.output(h)
class StateActionQFunction(object, metaclass=ABCMeta):
"""Abstract Q-function with state and action input."""
@abstractmethod
def __call__(self, x, a):
"""Evaluates Q-function
Args:
x (ndarray): state input
a (ndarray): action input
Returns:
Q-value for state x and action a
"""
raise NotImplementedError()
class FCLateActionSAQFunction(nn.Module, StateActionQFunction):
"""Fully-connected (s,a)-input Q-function with late action input.
Actions are not included until the second hidden layer and not normalized.
This architecture is used in the DDPG paper:
http://arxiv.org/abs/1509.02971
Args:
n_dim_obs (int): Number of dimensions of observation space.
n_dim_action (int): Number of dimensions of action space.
n_hidden_channels (int): Number of hidden channels.
n_hidden_layers (int): Number of hidden layers. It must be greater than
or equal to 1.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0):
assert n_hidden_layers >= 1
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__()
self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
hidden_sizes=[])
self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1,
hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers -
1), nonlinearity=nonlinearity, last_wscale=last_wscale)
self.output = self.mlp.output
def forward(self, state, action):
h = self.nonlinearity(self.obs_mlp(state))
h = torch.cat((h, action), dim=1)
return self.mlp(h)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_dim_obs': 4, 'n_dim_action': 4, 'n_hidden_channels': 4,
'n_hidden_layers': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
from abc import ABCMeta
from abc import abstractmethod
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 8), (8, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](buf0, primals_3, primals_4, buf1,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5,
(8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf0,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_3
return buf3, primals_1, buf1, primals_5, buf4
def init_lecun_normal(tensor, scale=1.0):
"""Initializes the tensor with LeCunNormal."""
fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in')
std = scale * np.sqrt(1.0 / fan_in)
with torch.no_grad():
return tensor.normal_(0, std)
@torch.no_grad()
def init_chainer_default(layer):
"""Initializes the layer with the chainer default.
weights with LeCunNormal(scale=1.0) and zeros as biases
"""
assert isinstance(layer, nn.Module)
if isinstance(layer, (nn.Linear, nn.Conv2d)):
init_lecun_normal(layer.weight)
if layer.bias is not None:
nn.init.zeros_(layer.bias)
return layer
class MLP(nn.Module):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu,
last_wscale=1):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
self.nonlinearity = nonlinearity
super().__init__()
if hidden_sizes:
self.hidden_layers = nn.ModuleList()
self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
self.hidden_layers.append(nn.Linear(hin, hout))
self.hidden_layers.apply(init_chainer_default)
self.output = nn.Linear(hidden_sizes[-1], out_size)
else:
self.output = nn.Linear(in_size, out_size)
init_lecun_normal(self.output.weight, scale=last_wscale)
nn.init.zeros_(self.output.bias)
def forward(self, x):
h = x
if self.hidden_sizes:
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
return self.output(h)
class StateActionQFunction(object, metaclass=ABCMeta):
"""Abstract Q-function with state and action input."""
@abstractmethod
def __call__(self, x, a):
"""Evaluates Q-function
Args:
x (ndarray): state input
a (ndarray): action input
Returns:
Q-value for state x and action a
"""
raise NotImplementedError()
class FCLateActionSAQFunctionNew(nn.Module, StateActionQFunction):
"""Fully-connected (s,a)-input Q-function with late action input.
Actions are not included until the second hidden layer and not normalized.
This architecture is used in the DDPG paper:
http://arxiv.org/abs/1509.02971
Args:
n_dim_obs (int): Number of dimensions of observation space.
n_dim_action (int): Number of dimensions of action space.
n_hidden_channels (int): Number of hidden channels.
n_hidden_layers (int): Number of hidden layers. It must be greater than
or equal to 1.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0):
assert n_hidden_layers >= 1
self.n_input_channels = n_dim_obs + n_dim_action
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.nonlinearity = nonlinearity
super().__init__()
self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels,
hidden_sizes=[])
self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1,
hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers -
1), nonlinearity=nonlinearity, last_wscale=last_wscale)
self.output = self.mlp.output
def forward(self, input_0, input_1):
primals_1 = self.obs_mlp.output.weight
primals_3 = self.obs_mlp.output.bias
primals_5 = self.mlp.output.weight
primals_6 = self.mlp.output.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
tarokiritani/pfrl
|
FCLateActionSAQFunction
| false | 11,031 |
[
"MIT"
] | 0 |
284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
https://github.com/tarokiritani/pfrl/tree/284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
conv_head_pooling
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cls_token], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return (buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_pooling, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1,
'conv_type': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0)
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_poolingNew, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, input_0, input_1):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
yasarniyazoglu/d2go
|
conv_head_pooling
| false | 11,032 |
[
"Apache-2.0"
] | 0 |
308c2700c51c70a7a928d99a477b64e856d1ed5e
|
https://github.com/yasarniyazoglu/d2go/tree/308c2700c51c70a7a928d99a477b64e856d1ed5e
|
MultiHeadAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ue/cueewzxuy4vxtpvu3vvsk7dj77iejxhffcljrvuqynhlke7j7x72.py
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
# Source node to ATen node mapping:
# attn => clone
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 2.0), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_0 = async_compile.triton('triton_poi_fused_clone_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ri/cricgdtr5c24l63g746gjtdd45qor3pkzmi7qmyygyd24ejrijb7.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/s7/cs7p2dyxlesdvuyx4owztmqg5sapsarlgzaivin7okeoe6lxygw7.py
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_19, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/r7/cr7rb6culc7hicmne2a4ips5pg6g22yb6ww46h6efrp4i2of5v4k.py
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# q_5 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_19, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {})
triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, attn], Original ATen: [aten.div, aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_div_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_5.run(buf11, primals_1, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_6.run(buf11, primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_9
return (buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.fc(q)
q += residual
q = self.layer_norm(q)
return q, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 16), (16, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(256)](buf0, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(64, 4)](buf1, buf4, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_7, (16, 4), (1, 16), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf11, primals_1,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf11, primals_1,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_1, primals_8, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), buf11, primals_7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttentionNew(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, input_0, input_1, input_2):
primals_4 = self.w_qs.weight
primals_5 = self.w_ks.weight
primals_6 = self.w_vs.weight
primals_7 = self.fc.weight
primals_8 = self.layer_norm.weight
primals_9 = self.layer_norm.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
yshen47/mvsnerf
|
MultiHeadAttention
| false | 11,033 |
[
"MIT"
] | 0 |
38ab4cf4fc5d025a9ad04e4a801b501ea9a78fb4
|
https://github.com/yshen47/mvsnerf/tree/38ab4cf4fc5d025a9ad04e4a801b501ea9a78fb4
|
topk_PAM_Module
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tc/ctclgwi3cin26aywwjc7fkzkk6nbkxcv6auyyqi6i7x3vsdwhiar.py
# Topologically Sorted Source Nodes: [at_sparse], Original ATen: [aten.zeros_like]
# Source node to ATen node mapping:
# at_sparse => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 16, 16], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zeros_like_1 = async_compile.triton('triton_poi_fused_zeros_like_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_like_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/q7/cq7h5t4ljuh2qliygpms7vlxhr5pxr2ckuyuay6shqiktqar27xw.py
# Topologically Sorted Source Nodes: [attention_mask], Original ATen: [aten.scatter]
# Source node to ATen node mapping:
# attention_mask => scatter
# Graph fragment:
# %scatter : [num_users=2] = call_function[target=torch.ops.aten.scatter.value](args = (%full_default, 2, %getitem_1, 1.0), kwargs = {})
triton_poi_fused_scatter_2 = async_compile.triton('triton_poi_fused_scatter_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_2', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.device_assert(((0 <= tmp0) & (tmp0 < 16)) | ~(xmask), "index out of bounds: 0 <= tmp0 < 16")
tmp2 = 1.0
tl.store(out_ptr0 + (tmp0 + (16*x0)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/k3/ck3i6xzxv4jjxnxhi4bqh5s7qlenblmijzneqcteclw6pcfcwoyy.py
# Topologically Sorted Source Nodes: [max_1, sub, exp_input, mask_exp_input, sum_1, sum_mask_exp_input, attention], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# attention => div
# exp_input => exp
# mask_exp_input => mul
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# sum_mask_exp_input => add
# Graph fragment:
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%bmm, -1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %getitem_2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %scatter), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-10), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
triton_per_fused_add_div_exp_max_mul_sub_sum_3 = async_compile.triton('triton_per_fused_add_div_exp_max_mul_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_max_mul_sub_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_exp_max_mul_sub_sum_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(rindex, tmp3.shape)
_, tmp13_tmp = triton_helpers.max_with_index(tmp3, tmp14, 1)
tmp13 = tmp13_tmp[:, None]
tmp15 = 1e-10
tmp16 = tmp12 + tmp15
tmp17 = tmp8 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp17, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/56/c56po7cr4xmblcwkxgeoj2jotja6xqu5mho5rgjjwcw4vvfjnxxs.py
# Topologically Sorted Source Nodes: [mul_1, out_2], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_1
# out_2 => add_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_1), kwargs = {})
triton_poi_fused_add_mul_4 = async_compile.triton('triton_poi_fused_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4)
# Topologically Sorted Source Nodes: [proj_value], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [proj_value], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf6, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk]
buf7 = torch.ops.aten.topk.default(buf4, 1, 2, True, False)
buf9 = buf7[1]
del buf7
buf10 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [at_sparse], Original ATen: [aten.zeros_like]
triton_poi_fused_zeros_like_1.run(buf10, 1024, grid=grid(1024), stream=stream0)
buf11 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_mask], Original ATen: [aten.scatter]
triton_poi_fused_zeros_like_1.run(buf11, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [attention_mask], Original ATen: [aten.scatter]
triton_poi_fused_scatter_2.run(buf9, buf11, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
buf15 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32)
buf14 = buf9; del buf9 # reuse
buf16 = reinterpret_tensor(buf15, (4, 16, 1), (16, 1, 1), 0); del buf15 # reuse
buf17 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, sub, exp_input, mask_exp_input, sum_1, sum_mask_exp_input, attention], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.div]
triton_per_fused_add_div_exp_max_mul_sub_sum_3.run(buf16, buf4, buf11, buf13, buf14, buf17, 64, 16, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf17, (4, 16, 16), (256, 1, 16), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out_2], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(primals_8, buf18, primals_1, buf19, 256, grid=grid(256), stream=stream0)
return (buf19, primals_1, primals_2, primals_4, primals_6, primals_8, buf4, buf10, buf11, buf13, buf14, buf16, buf17, buf18, reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.module import Module
def mask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
mask_exp_input = torch.mul(exp_input, mask)
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim=True
) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
def mvmask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
if torch.is_tensor(mask):
return mask_softmax(input, mask=mask, dim=dim)
mask = [mask[0], mask[1]]
N, _H, _W = mask[0].size()
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
if N == 1:
mask_exp_input = torch.mul(exp_input, mask[0])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim
=True) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
else:
Sm = 0
for i in range(N):
mask_exp_input = torch.mul(exp_input, mask[0][i])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim,
keepdim=True) + 1e-10
Sm = Sm + torch.div(mask_exp_input, sum_mask_exp_input)
return torch.mul(Sm, mask[1])
class Mask_Softmax(Module):
"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range (0,1) and sum to 1
Softmax is defined as:
.. math::
\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}
Shape:
- Input: any shape
- Output: same as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Mask_Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return mvmask_softmax(input[0], input[1], self.dim)
class topk_PAM_Module(nn.Module):
""" Position attention module"""
def __init__(self, in_dim, key_dim, out_dim, topk=10):
super(topk_PAM_Module, self).__init__()
self.chanel_in = in_dim
self.topk = topk
self.key_channels = key_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=
key_dim, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=key_dim,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=
out_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = Mask_Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
proj_value = self.value_conv(x)
proj_value = proj_value.view(m_batchsize, -1, width * height)
_val, idx = torch.topk(energy, height * width // self.topk, dim=2,
largest=True, sorted=False)
at_sparse = torch.zeros_like(energy)
attention_mask = at_sparse.scatter_(2, idx, 1.0)
attention = self.softmax([energy, attention_mask])
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'key_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_scatter_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.device_assert((0 <= tmp0) & (tmp0 < 16) | ~xmask,
'index out of bounds: 0 <= tmp0 < 16')
tmp2 = 1.0
tl.store(out_ptr0 + (tmp0 + 16 * x0), tmp2, xmask)
@triton.jit
def triton_per_fused_add_div_exp_max_mul_sub_sum_3(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(rindex, tmp3.shape)
_, tmp13_tmp = triton_helpers.max_with_index(tmp3, tmp14, 1)
tmp13 = tmp13_tmp[:, None]
tmp15 = 1e-10
tmp16 = tmp12 + tmp15
tmp17 = tmp8 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp17, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4)
buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_0[grid(256)](buf6, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf7 = torch.ops.aten.topk.default(buf4, 1, 2, True, False)
buf9 = buf7[1]
del buf7
buf10 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(1024)](buf10, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_poi_fused_zeros_like_1[grid(1024)](buf11, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
triton_poi_fused_scatter_2[grid(64)](buf9, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
buf15 = empty_strided_cuda((4, 16, 1), (16, 1, 64), torch.float32)
buf14 = buf9
del buf9
buf16 = reinterpret_tensor(buf15, (4, 16, 1), (16, 1, 1), 0)
del buf15
buf17 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused_add_div_exp_max_mul_sub_sum_3[grid(64)](buf16,
buf4, buf11, buf13, buf14, buf17, 64, 16, XBLOCK=32, num_warps=
4, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf17, (4, 16, 16), (256, 1, 16), 0),
out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(256)](primals_8, buf18, primals_1,
buf19, 256, XBLOCK=128, num_warps=4, num_stages=1)
return (buf19, primals_1, primals_2, primals_4, primals_6, primals_8,
buf4, buf10, buf11, buf13, buf14, buf16, buf17, buf18,
reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0))
def mask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
mask_exp_input = torch.mul(exp_input, mask)
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim=True
) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
def mvmask_softmax(input, mask=None, dim=-1):
"""Applies a softmax function.
Softmax is defined as:
:math:`\\text{Softmax}(x_{i}) = \\frac{exp(x_i)}{\\sum_j exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `(0, 1)` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Arguments:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if mask is None:
return F.softmax(input, dim=dim, _stacklevel=5)
else:
if torch.is_tensor(mask):
return mask_softmax(input, mask=mask, dim=dim)
mask = [mask[0], mask[1]]
N, _H, _W = mask[0].size()
max_input = input.max(dim=dim, keepdim=True)
exp_input = torch.exp(input - max_input[0])
if N == 1:
mask_exp_input = torch.mul(exp_input, mask[0])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim, keepdim
=True) + 1e-10
return torch.div(mask_exp_input, sum_mask_exp_input)
else:
Sm = 0
for i in range(N):
mask_exp_input = torch.mul(exp_input, mask[0][i])
sum_mask_exp_input = torch.sum(mask_exp_input, dim=dim,
keepdim=True) + 1e-10
Sm = Sm + torch.div(mask_exp_input, sum_mask_exp_input)
return torch.mul(Sm, mask[1])
class Mask_Softmax(Module):
"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range (0,1) and sum to 1
Softmax is defined as:
.. math::
\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}
Shape:
- Input: any shape
- Output: same as input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
def __init__(self, dim=None):
super(Mask_Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return mvmask_softmax(input[0], input[1], self.dim)
class topk_PAM_ModuleNew(nn.Module):
""" Position attention module"""
def __init__(self, in_dim, key_dim, out_dim, topk=10):
super(topk_PAM_ModuleNew, self).__init__()
self.chanel_in = in_dim
self.topk = topk
self.key_channels = key_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=
key_dim, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=key_dim,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=
out_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = Mask_Softmax(dim=-1)
def forward(self, input_0):
primals_8 = self.gamma
primals_2 = self.query_conv.weight
primals_3 = self.query_conv.bias
primals_4 = self.key_conv.weight
primals_5 = self.key_conv.bias
primals_6 = self.value_conv.weight
primals_7 = self.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
yougoforward/OCNet1931
|
topk_PAM_Module
| false | 11,034 |
[
"MIT"
] | 0 |
e679e9f248aff2f06e1d983e4e30230e5fc5174f
|
https://github.com/yougoforward/OCNet1931/tree/e679e9f248aff2f06e1d983e4e30230e5fc5174f
|
Classifier
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kn/ckn2syfaxwp4ztr62byudg2w3sr3vpb73ohe73jlv2tii6s6aj2v.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_4 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = (xindex // 1632)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (408 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (816 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1224 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mw/cmw5idtkljb5udzozaozopfkxrnlgn4fdddbwb4qtweauenliupq.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_4 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = (xindex // 1632)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (408 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (816 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1224 + x0 + (1632*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (102, 4), (4, 1))
assert_size_stride(primals_5, (102, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 102), (102, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 102), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 102), (1632, 408, 102, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 6528, grid=grid(6528), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 102), (1632, 408, 102, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 6528, grid=grid(6528), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((102, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((102, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, inputs, hidden_units):
super().__init__()
self.hidden = nn.Linear(inputs, hidden_units)
self.output = nn.Linear(hidden_units, 102)
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = self.hidden(x)
x = F.relu(x)
x = self.dropout(x)
x = self.output(x)
x = F.log_softmax(x, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inputs': 4, 'hidden_units': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = xindex // 1632
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1632 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (408 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp4 = tl.load(in_ptr0 + (816 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (1224 + x0 + 1632 * x2), xmask,
eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 408
x2 = xindex // 1632
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 1632 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (408 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (816 + x0 + 1632 * x2), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (1224 + x0 + 1632 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (102, 4), (4, 1))
assert_size_stride(primals_5, (102,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 102), (102, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 102), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 102), (1632, 408, 102, 1),
torch.float32)
triton_poi_fused__log_softmax_1[grid(6528)](buf2, buf3, 6528,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 102), (1632, 408, 102, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(6528)](buf3, buf4, 6528,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class ClassifierNew(nn.Module):
def __init__(self, inputs, hidden_units):
super().__init__()
self.hidden = nn.Linear(inputs, hidden_units)
self.output = nn.Linear(hidden_units, 102)
self.dropout = nn.Dropout(p=0.2)
def forward(self, input_0):
primals_1 = self.hidden.weight
primals_2 = self.hidden.bias
primals_4 = self.output.weight
primals_5 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
zamerman/Udacity-AI-Programming
|
Classifier
| false | 11,035 |
[
"MIT"
] | 0 |
6537f273fb00531d448330c1c85886d86e1161d2
|
https://github.com/zamerman/Udacity-AI-Programming/tree/6537f273fb00531d448330c1c85886d86e1161d2
|
UnaryBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/e6/ce6tqme72afv5izixrlqsb4h23mpercfe7y5zvnsisyh2oc2duiq.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_3 => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [0, 2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_clone_0 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_clone_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5q/c5qbi4chxnbs53eylw22fbsfbunx4fnrutx3avt4ruggohxocntf.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_6 => gt, mul_1, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze_2, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.1
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (y0 + (4*x1)), tmp9, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone, aten._native_batch_norm_legit]
stream0 = get_raw_stream(0)
triton_poi_fused__native_batch_norm_legit_clone_0.run(buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf0, buf1, buf2, buf3, 4, 4, grid=grid(4, 4), stream=stream0)
del buf1
del buf2
return (buf3, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32),
requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return (
'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'
.format(self.in_dim, self.bn_momentum, str(not self.use_bn)))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum
)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch=None):
x = self.mlp(x)
x = self.batch_norm(x)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return (
'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'
.format(self.in_dim, self.out_dim, str(self.use_bn), str(not
self.no_relu)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'use_bn': 4, 'bn_momentum': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__native_batch_norm_legit_clone_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.1
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (y0 + 4 * x1), tmp9, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__native_batch_norm_legit_clone_0[grid(4)](buf0,
buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_leaky_relu_1[grid(4, 4)](buf0, buf1, buf2, buf3, 4,
4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
del buf1
del buf2
return buf3, primals_2, buf0
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32),
requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return (
'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'
.format(self.in_dim, self.bn_momentum, str(not self.use_bn)))
class UnaryBlockNew(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlockNew, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum
)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def __repr__(self):
return (
'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'
.format(self.in_dim, self.out_dim, str(self.use_bn), str(not
self.no_relu)))
def forward(self, input_0):
primals_1 = self.mlp.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wuxingzhe/OverPredactor
|
UnaryBlock
| false | 11,036 |
[
"MIT"
] | 0 |
3a0965f4c3fc84ec0dcba555ec7c460f265d9143
|
https://github.com/wuxingzhe/OverPredactor/tree/3a0965f4c3fc84ec0dcba555ec7c460f265d9143
|
Encoder
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wv/cwvsxo4q6wyoxpozsubbimmg6xvl34ow44hy6yl5mwa23uuy77sa.py
# Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# mixture_w => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1), (4, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
return (buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(Encoder, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, mixture):
"""
Args:
mixture: [M, T], M is batch size, T is #samples
Returns:
mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1
"""
mixture = torch.unsqueeze(mixture, 1)
mixture_w = F.relu(self.conv1d_U(mixture))
return mixture_w
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'L': 4, 'N': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 4, 1), (4, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf2
class EncoderNew(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N):
super(EncoderNew, self).__init__()
self.L, self.N = L, N
self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=
False)
def forward(self, input_0):
primals_2 = self.conv1d_U.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zhangxinaaaa/Conv-TasNet
|
Encoder
| false | 11,037 |
[
"MIT"
] | 0 |
4622d93d0b9dbe23584addd4f4b9463255651652
|
https://github.com/zhangxinaaaa/Conv-TasNet/tree/4622d93d0b9dbe23584addd4f4b9463255651652
|
Conv2d_dilated
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/jw/cjw2limywkgcxcmakwde6yhejpgbx3f2uhb57pqab2yabyayyfay.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 5) % 5
x0 = xindex % 5
x2 = (xindex // 25)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ac/cac3fsg2etmtbrojzasevhhi6adzjv3vdon2r6addczf27vwsi2g.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 400, grid=grid(400), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
def same_padding_length(input_length, filter_size, stride, dilation=1):
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
output_length = (input_length + stride - 1) // stride
pad_length = max(0, (output_length - 1) * stride + dilated_filter_size -
input_length)
return pad_length
def compute_same_padding2d(input_shape, kernel_size, strides, dilation):
space = input_shape[2:]
assert len(space) == 2, '{}'.format(space)
new_space = []
new_input = []
for i in range(len(space)):
pad_length = same_padding_length(space[i], kernel_size[i], stride=
strides[i], dilation=dilation[i])
new_space.append(pad_length)
new_input.append(pad_length % 2)
return tuple(new_space), tuple(new_input)
class _Conv2d_dilated(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
super(_Conv2d_dilated, self).__init__(in_channels, out_channels,
kernel_size, stride, _pair(0), dilation, False, _pair(0),
groups, bias, padding_mode='zeros')
def forward(self, input, dilation=None):
input_shape = list(input.size())
dilation_rate = self.dilation if dilation is None else _pair(dilation)
padding, pad_input = compute_same_padding2d(input_shape,
kernel_size=self.kernel_size, strides=self.stride, dilation=
dilation_rate)
if pad_input[0] == 1 or pad_input[1] == 1:
input = F.pad(input, [0, int(pad_input[0]), 0, int(pad_input[1])])
return F.conv2d(input, self.weight, self.bias, self.stride, (
padding[0] // 2, padding[1] // 2), dilation_rate, self.groups)
class Conv2d_dilated(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, dilation=1, bn=False, bias=True, groups=1
):
super(Conv2d_dilated, self).__init__()
self.conv = _Conv2d_dilated(in_channels, out_channels, kernel_size,
stride, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
else:
self.relu = None
def forward(self, x, dilation=None):
x = self.conv(x, dilation)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf2,
primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf3
def same_padding_length(input_length, filter_size, stride, dilation=1):
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
output_length = (input_length + stride - 1) // stride
pad_length = max(0, (output_length - 1) * stride + dilated_filter_size -
input_length)
return pad_length
def compute_same_padding2d(input_shape, kernel_size, strides, dilation):
space = input_shape[2:]
assert len(space) == 2, '{}'.format(space)
new_space = []
new_input = []
for i in range(len(space)):
pad_length = same_padding_length(space[i], kernel_size[i], stride=
strides[i], dilation=dilation[i])
new_space.append(pad_length)
new_input.append(pad_length % 2)
return tuple(new_space), tuple(new_input)
class _Conv2d_dilated(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
super(_Conv2d_dilated, self).__init__(in_channels, out_channels,
kernel_size, stride, _pair(0), dilation, False, _pair(0),
groups, bias, padding_mode='zeros')
def forward(self, input, dilation=None):
input_shape = list(input.size())
dilation_rate = self.dilation if dilation is None else _pair(dilation)
padding, pad_input = compute_same_padding2d(input_shape,
kernel_size=self.kernel_size, strides=self.stride, dilation=
dilation_rate)
if pad_input[0] == 1 or pad_input[1] == 1:
input = F.pad(input, [0, int(pad_input[0]), 0, int(pad_input[1])])
return F.conv2d(input, self.weight, self.bias, self.stride, (
padding[0] // 2, padding[1] // 2), dilation_rate, self.groups)
class Conv2d_dilatedNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL
='relu', same_padding=False, dilation=1, bn=False, bias=True, groups=1
):
super(Conv2d_dilatedNew, self).__init__()
self.conv = _Conv2d_dilated(in_channels, out_channels, kernel_size,
stride, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
if NL == 'relu':
self.relu = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.relu = nn.PReLU()
elif NL == 'tanh':
self.relu = nn.Tanh()
elif NL == 'lrelu':
self.relu = nn.LeakyReLU(inplace=True)
elif NL == 'sigmoid':
self.relu = nn.Sigmoid()
else:
self.relu = None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
xwjBupt/Counting-ICCV-DSSINet
|
Conv2d_dilated
| false | 11,038 |
[
"MIT"
] | 0 |
92e4c56c93572fb2b026d573c3e711ce85a4af8f
|
https://github.com/xwjBupt/Counting-ICCV-DSSINet/tree/92e4c56c93572fb2b026d573c3e711ce85a4af8f
|
DiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wb/cwbzileksayxr6wz23lzdor7mw4rry2y5br742wp5wkskmqwzpjw.py
# Topologically Sorted Source Nodes: [mul, intersect, add, denominator], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# add => add
# denominator => sum_2
# intersect => sum_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [-1]), kwargs = {})
triton_per_fused_add_mul_sum_0 = async_compile.triton('triton_per_fused_add_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 + tmp2
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5i/c5igbunkwt5rtqc5m4objcih3pcqb6hetejxaqtxaz6ll2xxrvjb.py
# Topologically Sorted Source Nodes: [mul_1, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.clamp, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# clamp => clamp_min
# mean => mean
# mul_1 => mul_1
# per_channel_dice => div
# sub => sub
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_2, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %clamp_min), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_clamp_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_clamp_div_mean_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = 1e-05
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 / tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersect, add, denominator], Original ATen: [aten.mul, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_1, clamp, per_channel_dice, sub, mean], Original ATen: [aten.mul, aten.clamp, aten.div, aten.rsub, aten.mean]
triton_per_fused_clamp_div_mean_mul_rsub_1.run(buf3, buf0, buf1, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.autograd import Variable
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order)
return transposed.contiguous().view(C, -1)
def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index=
None, weight=None):
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
target = target.float()
if ignore_index is not None:
mask = target.clone().ne_(ignore_index)
mask.requires_grad = False
input = input * mask
target = target * mask
input = flatten(input)
target = flatten(target)
intersect = (input * target).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (input + target).sum(-1)
return 2.0 * intersect / denominator.clamp(min=epsilon)
class DiceLoss(nn.Module):
"""Computes Dice Loss, which just 1 - DiceCoefficient described above.
Additionally allows per-class weights to be provided.
"""
def __init__(self, epsilon=1e-05, weight=None, ignore_index=None,
sigmoid_normalization=True, skip_last_target=False):
super(DiceLoss, self).__init__()
self.epsilon = epsilon
self.register_buffer('weight', weight)
self.ignore_index = ignore_index
if sigmoid_normalization:
self.normalization = nn.Sigmoid()
else:
self.normalization = nn.Softmax(dim=1)
self.skip_last_target = skip_last_target
def forward(self, input, target):
input = self.normalization(input)
if self.weight is not None:
weight = Variable(self.weight, requires_grad=False)
else:
weight = None
if self.skip_last_target:
target = target[:, :-1, ...]
per_channel_dice = compute_per_channel_dice(input, target, epsilon=
self.epsilon, ignore_index=self.ignore_index, weight=weight)
return torch.mean(1.0 - per_channel_dice)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tmp1 + tmp2
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_per_fused_clamp_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = 1e-05
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp2 / tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_clamp_div_mean_mul_rsub_1[grid(1)](buf3, buf0,
buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order)
return transposed.contiguous().view(C, -1)
def compute_per_channel_dice(input, target, epsilon=1e-05, ignore_index=
None, weight=None):
assert input.size() == target.size(
), "'input' and 'target' must have the same shape"
target = target.float()
if ignore_index is not None:
mask = target.clone().ne_(ignore_index)
mask.requires_grad = False
input = input * mask
target = target * mask
input = flatten(input)
target = flatten(target)
intersect = (input * target).sum(-1)
if weight is not None:
intersect = weight * intersect
denominator = (input + target).sum(-1)
return 2.0 * intersect / denominator.clamp(min=epsilon)
class DiceLossNew(nn.Module):
"""Computes Dice Loss, which just 1 - DiceCoefficient described above.
Additionally allows per-class weights to be provided.
"""
def __init__(self, epsilon=1e-05, weight=None, ignore_index=None,
sigmoid_normalization=True, skip_last_target=False):
super(DiceLossNew, self).__init__()
self.epsilon = epsilon
self.register_buffer('weight', weight)
self.ignore_index = ignore_index
if sigmoid_normalization:
self.normalization = nn.Sigmoid()
else:
self.normalization = nn.Softmax(dim=1)
self.skip_last_target = skip_last_target
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
zzz123xyz/pytorch-3dunet
|
DiceLoss
| false | 11,039 |
[
"MIT"
] | 0 |
5bab6968b23cff5c6ae456b343285bfa9b104294
|
https://github.com/zzz123xyz/pytorch-3dunet/tree/5bab6968b23cff5c6ae456b343285bfa9b104294
|
RPN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv => relu
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ua/cuamkt6tfcfi2wcpcuttcy4xxesdettzb253463drt7imw5jbzww.py
# Topologically Sorted Source Nodes: [bbox_deltas], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# bbox_deltas => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l5/cl5ngie4yxsggu5cyzj42gk3bmbc7j54csslnagp2yuclhz5yv4s.py
# Topologically Sorted Source Nodes: [cls_scores], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# cls_scores => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ap/capyi6zibistdca7cf2xdwmkwijye7fm2s4ia75hl7baus2ua5dc.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = (xindex // 128)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (128*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + (128*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [cls_scores], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 4, 4), (128, 16, 4, 1))
# Topologically Sorted Source Nodes: [bbox_deltas], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [bbox_deltas], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cls_scores], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 512, grid=grid(512), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 2, 4, 4, 4), (128, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 512, grid=grid(512), stream=stream0)
return (buf4, reinterpret_tensor(buf5, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 4), (128, 16, 4, 1), 64), primals_1, primals_3, primals_4, primals_6, buf1, reinterpret_tensor(buf5, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
def conv(in_channels, out_channels, kernel_size=3, padding=1, bn=True,
dilation=1, stride=1, relu=True, bias=True):
modules = [nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=bias)]
if bn:
modules.append(nn.BatchNorm2d(out_channels))
if relu:
modules.append(nn.ReLU(inplace=True))
return nn.Sequential(*modules)
class RPN(nn.Module):
def __init__(self, dim_in, dim_internal, priors_num,
class_activation_mode='softmax'):
super().__init__()
assert class_activation_mode in ('softmax', 'sigmoid')
self.dim_in = dim_in
self.dim_internal = dim_internal
self.priors_num = priors_num
self.dim_score = (priors_num * 2 if class_activation_mode ==
'softmax' else priors_num)
self.class_activation_mode = class_activation_mode
self.conv = nn.Conv2d(dim_in, dim_internal, 3, 1, 1)
self.cls_score = nn.Conv2d(dim_internal, self.dim_score, 1, 1, 0)
self.bbox_deltas = nn.Conv2d(dim_internal, 4 * priors_num, 1, 1, 0)
self.init_weights()
def init_weights(self):
nn.init.normal_(self.conv.weight, std=0.01)
nn.init.constant_(self.conv.bias, 0)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_deltas.weight, std=0.01)
nn.init.constant_(self.bbox_deltas.bias, 0)
def forward(self, x):
conv = F.relu(self.conv(x), inplace=True)
cls_scores = self.cls_score(conv)
bbox_deltas = self.bbox_deltas(conv)
if self.class_activation_mode == 'softmax':
b, _c, h, w = cls_scores.shape
cls_scores = cls_scores.view(b, 2, -1, h, w)
cls_probs = F.softmax(cls_scores, dim=1)[:, 1].squeeze(dim=1)
else:
cls_probs = torch.sigmoid(cls_scores)
return bbox_deltas, cls_scores, cls_probs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_internal': 4, 'priors_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 128
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 128 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 4, 4), (128, 16, 4, 1))
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(1024)](buf4, primals_7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = buf2
del buf2
triton_poi_fused_convolution_2[grid(512)](buf5, primals_5, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 2, 4, 4, 4), (128, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_3[grid(512)](buf5, buf6, 512, XBLOCK=256,
num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(buf5, (4, 2, 4, 4, 4), (128, 64, 16, 4,
1), 0), reinterpret_tensor(buf6, (4, 4, 4, 4), (128, 16, 4, 1), 64
), primals_1, primals_3, primals_4, primals_6, buf1, reinterpret_tensor(
buf5, (4, 2, 4, 4, 4), (128, 64, 16, 4, 1), 0)
def conv(in_channels, out_channels, kernel_size=3, padding=1, bn=True,
dilation=1, stride=1, relu=True, bias=True):
modules = [nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=bias)]
if bn:
modules.append(nn.BatchNorm2d(out_channels))
if relu:
modules.append(nn.ReLU(inplace=True))
return nn.Sequential(*modules)
class RPNNew(nn.Module):
def __init__(self, dim_in, dim_internal, priors_num,
class_activation_mode='softmax'):
super().__init__()
assert class_activation_mode in ('softmax', 'sigmoid')
self.dim_in = dim_in
self.dim_internal = dim_internal
self.priors_num = priors_num
self.dim_score = (priors_num * 2 if class_activation_mode ==
'softmax' else priors_num)
self.class_activation_mode = class_activation_mode
self.conv = nn.Conv2d(dim_in, dim_internal, 3, 1, 1)
self.cls_score = nn.Conv2d(dim_internal, self.dim_score, 1, 1, 0)
self.bbox_deltas = nn.Conv2d(dim_internal, 4 * priors_num, 1, 1, 0)
self.init_weights()
def init_weights(self):
nn.init.normal_(self.conv.weight, std=0.01)
nn.init.constant_(self.conv.bias, 0)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_deltas.weight, std=0.01)
nn.init.constant_(self.bbox_deltas.bias, 0)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.cls_score.weight
primals_5 = self.cls_score.bias
primals_6 = self.bbox_deltas.weight
primals_7 = self.bbox_deltas.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
|
vadimadr/openvino_training_extensions
|
RPN
| false | 11,040 |
[
"Apache-2.0"
] | 0 |
5d64b8423c8eb7b374ed629fad938359d34a07d2
|
https://github.com/vadimadr/openvino_training_extensions/tree/5d64b8423c8eb7b374ed629fad938359d34a07d2
|
CGRU_cell
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7p/c7p47e6apjbtbvcblw3pz3tosjv2owx6kydlykydxajntpaisask.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mul], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (128*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp13 = tl.load(in_ptr3 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp5, tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/fh/cfhrmdu4a5b6lq2c2bpgqfbjgz7e6u2krexgs6d7izvnwrkxlozj.py
# Topologically Sorted Source Nodes: [update_gate, p1, ct, mul_1, sub, mul_2, next_h], Original ATen: [aten.sigmoid, aten.convolution, aten.tanh, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# ct => tanh
# mul_1 => mul_1
# mul_2 => mul_2
# next_h => add
# p1 => convolution_1
# sub => sub
# update_gate => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x4 = xindex % 64
x1 = (xindex // 16) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x4 + (128*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_out_ptr0 + (x3), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp3 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp3
tmp11 = libdevice.tanh(tmp6)
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr0 + (x3), tmp3, xmask)
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/am/camfvuxfjpr5x5o2xw5w7rhbwnmxbrpfzmkbgmzb4kgtfra5wf5i.py
# Topologically Sorted Source Nodes: [reset_gate], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
# Source node to ATen node mapping:
# reset_gate => sigmoid
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_3), kwargs = {})
triton_poi_fused_sigmoid_sigmoid_backward_3 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x3 = xindex % 64
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (128*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = tmp3 * tmp5
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_4, (8, ), (1, ))
assert_size_stride(primals_5, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [c1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1))
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_2, buf1, primals_4, primals_1, buf3, 512, grid=grid(512), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [p1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [update_gate, p1, ct, mul_1, sub, mul_2, next_h], Original ATen: [aten.sigmoid, aten.convolution, aten.tanh, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2.run(buf5, buf1, primals_4, primals_6, primals_1, buf2, buf6, 256, grid=grid(256), stream=stream0)
del primals_6
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [reset_gate], Original ATen: [aten.sigmoid, aten.sigmoid_backward]
triton_poi_fused_sigmoid_sigmoid_backward_3.run(buf1, primals_4, buf7, 256, grid=grid(256), stream=stream0)
del buf1
del primals_4
return (buf6, primals_1, primals_3, primals_5, buf0, buf2, buf3, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as f
from math import sqrt as sqrt
from itertools import product as product
class CGRU_cell(nn.Module):
"""Initialize a basic Conv GRU cell.
Args:
filter_size: int that is the height and width of the filters
num_features: int thats the num of channels of the states, like hidden_size
"""
def __init__(self, input_chans, filter_size, num_features):
super(CGRU_cell, self).__init__()
self.input_chans = input_chans
self.filter_size = filter_size
self.num_features = num_features
self.padding = int((filter_size - 1) / 2)
self.ConvGates = nn.Conv2d(self.input_chans + self.num_features, 2 *
self.num_features, 3, padding=self.padding)
self.Conv_ct = nn.Conv2d(self.input_chans + self.num_features, self
.num_features, 3, padding=self.padding)
def forward(self, input, hidden_state):
hidden = hidden_state
c1 = self.ConvGates(torch.cat((input, hidden), 1))
rt, ut = c1.chunk(2, 1)
reset_gate = f.sigmoid(rt)
update_gate = f.sigmoid(ut)
gated_hidden = torch.mul(reset_gate, hidden)
p1 = self.Conv_ct(torch.cat((input, gated_hidden), 1))
ct = f.tanh(p1)
next_h = torch.mul(update_gate, hidden) + (1 - update_gate) * ct
return next_h
def init_hidden(self, input):
feature_size = input.size()[-2:]
return Variable(torch.zeros(input.size(0), self.num_features,
feature_size[0], feature_size[1]))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_chans': 4, 'filter_size': 4, 'num_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 128 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp13 = tl.load(in_ptr3 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp5, tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x4 = xindex % 64
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x4 + 128 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_out_ptr0 + x3, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp4 + tmp5
tmp8 = tmp3 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp3
tmp11 = libdevice.tanh(tmp6)
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr0 + x3, tmp3, xmask)
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp13, xmask)
@triton.jit
def triton_poi_fused_sigmoid_sigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x3 = xindex % 64
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 128 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = tmp3 * tmp5
tl.store(out_ptr0 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, primals_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 4, 4), (128, 16, 4, 1))
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](primals_2, buf1, primals_4,
primals_1, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_mul_rsub_sigmoid_tanh_2[grid(256)](
buf5, buf1, primals_4, primals_6, primals_1, buf2, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_sigmoid_backward_3[grid(256)](buf1,
primals_4, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_4
return buf6, primals_1, primals_3, primals_5, buf0, buf2, buf3, buf5, buf7
class CGRU_cellNew(nn.Module):
"""Initialize a basic Conv GRU cell.
Args:
filter_size: int that is the height and width of the filters
num_features: int thats the num of channels of the states, like hidden_size
"""
def __init__(self, input_chans, filter_size, num_features):
super(CGRU_cellNew, self).__init__()
self.input_chans = input_chans
self.filter_size = filter_size
self.num_features = num_features
self.padding = int((filter_size - 1) / 2)
self.ConvGates = nn.Conv2d(self.input_chans + self.num_features, 2 *
self.num_features, 3, padding=self.padding)
self.Conv_ct = nn.Conv2d(self.input_chans + self.num_features, self
.num_features, 3, padding=self.padding)
def init_hidden(self, input):
feature_size = input.size()[-2:]
return Variable(torch.zeros(input.size(0), self.num_features,
feature_size[0], feature_size[1]))
def forward(self, input_0, input_1):
primals_3 = self.ConvGates.weight
primals_4 = self.ConvGates.bias
primals_5 = self.Conv_ct.weight
primals_6 = self.Conv_ct.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
zhujiagang/realtime-refined-random
|
CGRU_cell
| false | 11,041 |
[
"MIT"
] | 0 |
3aa8169049ab8be8b1ea5a78bbe9b89ac6c15593
|
https://github.com/zhujiagang/realtime-refined-random/tree/3aa8169049ab8be8b1ea5a78bbe9b89ac6c15593
|
ResBlockWithFusedBN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l3/cl3kktfjbfxvoqsgvjon5fk5ycnqjp7n2a3dk3gk4gw4n2jfe25m.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => convolution_2
# out_5 => add
# out_6 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %primals_1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf5, primals_7, primals_1, buf6, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, primals_1, primals_2, primals_4, primals_6, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torchvision import models as models
import torch.onnx
class ResBlockWithFusedBN(nn.Module):
""" Bottleneck Residual Block """
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation
=1, group=1, stride_1x1=True):
super().__init__()
str1x1, str3x3 = (stride, 1) if stride_1x1 else (1, stride)
self.conv1 = nn.Conv2d(inplanes, innerplanes, kernel_size=1, stride
=str1x1, bias=True)
self.conv2 = nn.Conv2d(innerplanes, innerplanes, kernel_size=3,
stride=str3x3, bias=True, padding=1 * dilation, dilation=
dilation, groups=group)
self.conv3 = nn.Conv2d(innerplanes, outplanes, kernel_size=1,
stride=1, bias=True)
self.downsample = None
if stride != 1 or inplanes != outplanes:
self.downsample = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=stride, bias=True)
self.relu = nn.ReLU(inplace=True)
self._init_weights()
def _init_weights(self):
for submodule in self.modules():
if isinstance(submodule, nn.Conv2d):
nn.init.kaiming_uniform_(submodule.weight)
if submodule.bias is not None:
nn.init.constant_(submodule.bias, 0)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'outplanes': 4, 'innerplanes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torchvision import models as models
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf5, primals_7, primals_1, buf6, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
return buf5, primals_1, primals_2, primals_4, primals_6, buf1, buf3, buf6
class ResBlockWithFusedBNNew(nn.Module):
""" Bottleneck Residual Block """
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation
=1, group=1, stride_1x1=True):
super().__init__()
str1x1, str3x3 = (stride, 1) if stride_1x1 else (1, stride)
self.conv1 = nn.Conv2d(inplanes, innerplanes, kernel_size=1, stride
=str1x1, bias=True)
self.conv2 = nn.Conv2d(innerplanes, innerplanes, kernel_size=3,
stride=str3x3, bias=True, padding=1 * dilation, dilation=
dilation, groups=group)
self.conv3 = nn.Conv2d(innerplanes, outplanes, kernel_size=1,
stride=1, bias=True)
self.downsample = None
if stride != 1 or inplanes != outplanes:
self.downsample = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=stride, bias=True)
self.relu = nn.ReLU(inplace=True)
self._init_weights()
def _init_weights(self):
for submodule in self.modules():
if isinstance(submodule, nn.Conv2d):
nn.init.kaiming_uniform_(submodule.weight)
if submodule.bias is not None:
nn.init.constant_(submodule.bias, 0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
vadimadr/openvino_training_extensions
|
ResBlockWithFusedBN
| false | 11,042 |
[
"Apache-2.0"
] | 0 |
5d64b8423c8eb7b374ed629fad938359d34a07d2
|
https://github.com/vadimadr/openvino_training_extensions/tree/5d64b8423c8eb7b374ed629fad938359d34a07d2
|
BertSelfOutput
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/uv/cuv5ywiflh7n5wdgqtkufif4yho6cbzwcjdxkdjubib3zte4a6ke.py
# Topologically Sorted Source Nodes: [hidden_states_2, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# hidden_states_2 => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/my/cmysbppbwfknwvg2gi2jiqqs7l3hosm7k7nbz7vf3rtnd4p6gznh.py
# Topologically Sorted Source Nodes: [hidden_states_2, u, sub], Original ATen: [aten.add, aten.mean, aten.sub]
# Source node to ATen node mapping:
# hidden_states_2 => add
# sub => sub
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_mean_sub_1 = async_compile.triton('triton_poi_fused_add_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4p/c4pnuv3rymhg72qutbvx7mkzv6t7edcefa73bt3nl66b4qtouu4a.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_3], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_3 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_5), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [hidden_states_2, u, sub], Original ATen: [aten.add, aten.mean, aten.sub]
triton_poi_fused_add_mean_sub_1.run(buf2, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_3], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_4, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.onnx
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias
=False)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_3, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mean_sub_1[grid(256)](buf2, primals_3, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_4,
buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutputNew(nn.Module):
def __init__(self, config):
super(BertSelfOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias
=False)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_4 = self.LayerNorm.weight
primals_5 = self.LayerNorm.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
xerothermic/examples
|
BertSelfOutput
| false | 11,043 |
[
"MIT"
] | 0 |
d9d0ad02bac27fc483079d27c86b54145e45f81b
|
https://github.com/xerothermic/examples/tree/d9d0ad02bac27fc483079d27c86b54145e45f81b
|
CenterLoss
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize_1 => div_1
# Graph fragment:
# %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand_1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/a3/ca3qbp72xas42qtlvd3t3ohkaapjb5gccrl7lrqrojotoqljrnsl.py
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.div]
# Source node to ATen node mapping:
# features => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ar/cars53vh3rv5yitxlgn2bxyfibjpz4h4zdwjgkirdp2gn7fvquwp.py
# Topologically Sorted Source Nodes: [cosine_similarity, cos_diff, sum_1, center_loss], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# center_loss => div_4
# cos_diff => sub
# cosine_similarity => clamp_min_2, clamp_min_3, div_2, div_3, mul, pow_5, pow_6, pow_7, pow_8, sum_3, sum_4, sum_5
# sum_1 => sum_6
# Graph fragment:
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_3, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_2, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1], True), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_6, 1e-08), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_8, 1e-08), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_2, %clamp_min_3), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_3, %clamp_min_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, %div_2), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sum_5), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub,), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 4), kwargs = {})
triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2 = async_compile.triton('triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = (rindex // 16)
r4 = rindex % 16
r1 = (rindex // 4) % 4
r0 = rindex % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r4 + (64*r2)), None)
tmp2 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), None)
tmp5 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), None)
tmp8 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), None)
tmp15 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 1e-08
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp0 / tmp13
tmp16 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp17 = tmp15 + tmp16
tmp18 = tmp15 < 0
tmp19 = tl.where(tmp18, tmp17, tmp15)
tl.device_assert((0 <= tmp19) & (tmp19 < 4), "index out of bounds: 0 <= tmp19 < 4")
tmp21 = tl.load(in_ptr2 + (r0 + (4*tmp19)), None)
tmp22 = tmp21 * tmp21
tmp23 = tmp22 + tmp22
tmp24 = tmp23 + tmp22
tmp25 = tmp24 + tmp22
tmp26 = libdevice.sqrt(tmp25)
tmp27 = triton_helpers.maximum(tmp26, tmp12)
tmp28 = tmp21 / tmp27
tmp29 = tmp14 * tmp28
tmp30 = tmp2 / tmp13
tmp31 = tmp30 * tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp5 / tmp13
tmp34 = tmp33 * tmp28
tmp35 = tmp32 + tmp34
tmp36 = tmp8 / tmp13
tmp37 = tmp36 * tmp28
tmp38 = tmp35 + tmp37
tmp39 = 1.0
tmp40 = tmp39 - tmp38
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp45, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_3, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((), (), torch.float32)
buf18 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [cosine_similarity, cos_diff, sum_1, center_loss], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum, aten.rsub]
triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2.run(buf18, buf1, primals_2, buf0, 1, 64, grid=grid(1), stream=stream0)
del buf1
# Topologically Sorted Source Nodes: [], Original ATen: []
buf4 = torch.ops.aten.set_.source_Tensor(primals_3, buf0)
assert_size_stride(buf4, (4, 4), (4, 1))
del primals_3
return (buf18, primals_1, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import models as models
import torch.onnx
class CenterLoss(nn.Module):
"""Implements the Center loss from https://ydwen.github.io/papers/WenECCV16.pdf"""
def __init__(self, num_classes, embed_size, cos_dist=True):
super().__init__()
self.cos_dist = cos_dist
self.num_classes = num_classes
self.centers = nn.Parameter(torch.randn(self.num_classes, embed_size))
self.embed_size = embed_size
self.mse = nn.MSELoss(reduction='elementwise_mean')
def get_centers(self):
"""Returns estimated centers"""
return self.centers
def forward(self, features, labels):
features = F.normalize(features)
batch_size = labels.size(0)
features_dim = features.size(1)
assert features_dim == self.embed_size
if self.cos_dist:
self.centers.data = F.normalize(self.centers.data, p=2, dim=1)
centers_batch = self.centers[labels, :]
if self.cos_dist:
cos_sim = nn.CosineSimilarity()
cos_diff = 1.0 - cos_sim(features, centers_batch)
center_loss = torch.sum(cos_diff) / batch_size
else:
center_loss = self.mse(centers_batch, features)
return center_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'num_classes': 4, 'embed_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torchvision import models as models
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex // 16
r4 = rindex % 16
r1 = rindex // 4 % 4
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None)
tmp2 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None)
tmp5 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None)
tmp8 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None)
tmp15 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 1e-08
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp0 / tmp13
tmp16 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp17 = tmp15 + tmp16
tmp18 = tmp15 < 0
tmp19 = tl.where(tmp18, tmp17, tmp15)
tl.device_assert((0 <= tmp19) & (tmp19 < 4),
'index out of bounds: 0 <= tmp19 < 4')
tmp21 = tl.load(in_ptr2 + (r0 + 4 * tmp19), None)
tmp22 = tmp21 * tmp21
tmp23 = tmp22 + tmp22
tmp24 = tmp23 + tmp22
tmp25 = tmp24 + tmp22
tmp26 = libdevice.sqrt(tmp25)
tmp27 = triton_helpers.maximum(tmp26, tmp12)
tmp28 = tmp21 / tmp27
tmp29 = tmp14 * tmp28
tmp30 = tmp2 / tmp13
tmp31 = tmp30 * tmp28
tmp32 = tmp29 + tmp31
tmp33 = tmp5 / tmp13
tmp34 = tmp33 * tmp28
tmp35 = tmp32 + tmp34
tmp36 = tmp8 / tmp13
tmp37 = tmp36 * tmp28
tmp38 = tmp35 + tmp37
tmp39 = 1.0
tmp40 = tmp39 - tmp38
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_3, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_1[grid(256)](primals_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
buf18 = buf3
del buf3
triton_per_fused_clamp_min_div_linalg_vector_norm_mul_rsub_sum_2[grid
(1)](buf18, buf1, primals_2, buf0, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del buf1
buf4 = torch.ops.aten.set_.source_Tensor(primals_3, buf0)
assert_size_stride(buf4, (4, 4), (4, 1))
del primals_3
return buf18, primals_1, primals_2, buf0
class CenterLossNew(nn.Module):
"""Implements the Center loss from https://ydwen.github.io/papers/WenECCV16.pdf"""
def __init__(self, num_classes, embed_size, cos_dist=True):
super().__init__()
self.cos_dist = cos_dist
self.num_classes = num_classes
self.centers = nn.Parameter(torch.randn(self.num_classes, embed_size))
self.embed_size = embed_size
self.mse = nn.MSELoss(reduction='elementwise_mean')
def get_centers(self):
"""Returns estimated centers"""
return self.centers
def forward(self, input_0, input_1):
primals_3 = self.centers
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vadimadr/openvino_training_extensions
|
CenterLoss
| false | 11,044 |
[
"Apache-2.0"
] | 0 |
5d64b8423c8eb7b374ed629fad938359d34a07d2
|
https://github.com/vadimadr/openvino_training_extensions/tree/5d64b8423c8eb7b374ed629fad938359d34a07d2
|
spatial_attn_layer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/uc/cucdaa5tqnxykdmw5yqh7ir5ac35phopjcobljrg4rrtlnfjtuwd.py
# Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_compress => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6q/c6qyrmvchep2lyeodxjgze7brt2fv4khvsx2os2smplvfajckxaz.py
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# scale => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 5, 5), (50, 25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_compress], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(primals_1, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 5, 5), (50, 25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=False, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-05, momentum=0.01,
affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class spatial_attn_layer(nn.Module):
def __init__(self, kernel_size=5):
super(spatial_attn_layer, self).__init__()
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(
kernel_size - 1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out)
return x * scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = 4.0
tmp25 = tmp23 / tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp13, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 5, 5), (50, 25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, bn=False, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-05, momentum=0.01,
affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1)
.unsqueeze(1)), dim=1)
class spatial_attn_layerNew(nn.Module):
def __init__(self, kernel_size=5):
super(spatial_attn_layerNew, self).__init__()
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(
kernel_size - 1) // 2, relu=False)
def forward(self, input_0):
primals_2 = self.spatial.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
zhanzhibingshang/deblurganv2_mirnet
|
spatial_attn_layer
| false | 11,045 |
[
"BSD-3-Clause"
] | 0 |
12fcc94ee0ff33335c557cf46a776a13cae3804b
|
https://github.com/zhanzhibingshang/deblurganv2_mirnet/tree/12fcc94ee0ff33335c557cf46a776a13cae3804b
|
FocalDiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ns/cns2nrpncy33g5kjstj2ayuyxdnuiztaaq6qz65a3usbmalxtaf4.py
# Topologically Sorted Source Nodes: [mul, intersect, mul_3, add, mul_2, z_sum, mul_1, y_sum, add_1, add_2, dice, pow_1, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.pow, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# intersect => sum_1
# loss => sub
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# y_sum => sum_2
# z_sum => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-06), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg1_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %sum_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {})
triton_per_fused_add_div_mul_pow_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_pow_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tmp9 + tmp13
tmp19 = tmp18 + tmp16
tmp20 = tmp17 / tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersect, mul_3, add, mul_2, z_sum, mul_1, y_sum, add_1, add_2, dice, pow_1, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.pow, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_pow_rsub_sum_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class FocalDiceLoss(nn.Module):
def __init__(self, gamma=2.0):
super().__init__()
self.gamma = gamma
def forward(self, score, target):
target = target.float()
smooth = 1e-06
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
dice = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - dice ** (1.0 / self.gamma)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = tmp9 + tmp13
tmp19 = tmp18 + tmp16
tmp20 = tmp17 / tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_rsub_sum_0[grid(1)](buf3, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class FocalDiceLossNew(nn.Module):
def __init__(self, gamma=2.0):
super().__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xuyangcao/AttD2UNet
|
FocalDiceLoss
| false | 11,046 |
[
"MIT"
] | 0 |
b76ed8104a4183140b3cbd7f9671ca99d36e3b3e
|
https://github.com/xuyangcao/AttD2UNet/tree/b76ed8104a4183140b3cbd7f9671ca99d36e3b3e
|
FocalTiLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/k6/ck6m3kma4wwlohmp7l6n4fymg3yneitepb4xhrucrkrlqmyzmoqa.py
# Topologically Sorted Source Nodes: [intersection, add, p_, inter_alpha, mul, add_1, g_, inter_beta, mul_1, add_2, add_3, ti, sub_2, pow_1, loss], Original ATen: [aten.dot, aten.add, aten.rsub, aten.mul, aten.div, aten.pow, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# g_ => sub_1
# inter_alpha => mul_1, sum_2
# inter_beta => mul_2, sum_3
# intersection => mul, sum_1
# loss => mean
# mul => mul_3
# mul_1 => mul_4
# p_ => sub
# pow_1 => pow_1
# sub_2 => sub_2
# ti => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0.75), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_add_div_dot_mean_mul_pow_rsub_0 = async_compile.triton('triton_per_fused_add_div_dot_mean_mul_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_mean_mul_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_dot_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp7 * tmp1
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tmp6 - tmp1
tmp13 = tmp12 * tmp0
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 1e-06
tmp18 = tmp5 + tmp17
tmp19 = 0.7
tmp20 = tmp11 * tmp19
tmp21 = tmp5 + tmp20
tmp22 = 0.4
tmp23 = tmp16 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 + tmp17
tmp26 = tmp18 / tmp25
tmp27 = tmp6 - tmp26
tmp28 = 0.75
tmp29 = libdevice.pow(tmp27, tmp28)
tmp30 = tmp29 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [intersection, add, p_, inter_alpha, mul, add_1, g_, inter_beta, mul_1, add_2, add_3, ti, sub_2, pow_1, loss], Original ATen: [aten.dot, aten.add, aten.rsub, aten.mul, aten.div, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_dot_mean_mul_pow_rsub_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class FocalTiLoss(nn.Module):
def __init__(self, alpha=0.7, beta=0.4, gamma=0.75):
super().__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.eps = 1e-06
def forward(self, output, target):
output = output.float()
target = target.float()
pi = output.contiguous().view(-1)
gi = target.contiguous().view(-1)
p_ = 1 - pi
g_ = 1 - gi
intersection = torch.dot(pi, gi)
inter_alpha = torch.dot(p_, gi)
inter_beta = torch.dot(g_, pi)
ti = (intersection + self.eps) / (intersection + self.alpha *
inter_alpha + self.beta * inter_beta + self.eps)
loss = torch.mean(torch.pow(1 - ti, self.gamma))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_dot_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp7 * tmp1
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tmp6 - tmp1
tmp13 = tmp12 * tmp0
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 1e-06
tmp18 = tmp5 + tmp17
tmp19 = 0.7
tmp20 = tmp11 * tmp19
tmp21 = tmp5 + tmp20
tmp22 = 0.4
tmp23 = tmp16 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 + tmp17
tmp26 = tmp18 / tmp25
tmp27 = tmp6 - tmp26
tmp28 = 0.75
tmp29 = libdevice.pow(tmp27, tmp28)
tmp30 = tmp29 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_dot_mean_mul_pow_rsub_0[grid(1)](buf3,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class FocalTiLossNew(nn.Module):
def __init__(self, alpha=0.7, beta=0.4, gamma=0.75):
super().__init__()
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.eps = 1e-06
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
xuyangcao/AttD2UNet
|
FocalTiLoss
| false | 11,047 |
[
"MIT"
] | 0 |
b76ed8104a4183140b3cbd7f9671ca99d36e3b3e
|
https://github.com/xuyangcao/AttD2UNet/tree/b76ed8104a4183140b3cbd7f9671ca99d36e3b3e
|
attention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# output => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4], [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/eb/cebpbupczy3a7z6yffgxybumq5trdt3jp5hxwuoo6w6cunzz7d7h.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# output_3 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xv/cxvgsfj3x2o5ls6evsy4rhywutbtjkwezlavric3plphgvn75mea.py
# Topologically Sorted Source Nodes: [output_7], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# output_7 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1)
del primals_2
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_3, 8, grid=grid(8), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_7], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, primals_1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf2, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.nn.functional as F
class attention(nn.Module):
def __init__(self, input_channels, map_size):
super(attention, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=map_size)
self.fc1 = nn.Linear(in_features=input_channels, out_features=
input_channels // 2)
self.fc2 = nn.Linear(in_features=input_channels // 2, out_features=
input_channels)
def forward(self, x):
output = self.pool(x)
output = output.view(output.size()[0], output.size()[1])
output = self.fc1(output)
output = F.relu(output)
output = self.fc2(output)
output = F.sigmoid(output)
output = output.view(output.size()[0], output.size()[1], 1, 1)
output = torch.mul(x, output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'map_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1)
del primals_2
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(8)](buf2, primals_3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf4, primals_1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0
), buf2, buf3, primals_4
class attentionNew(nn.Module):
def __init__(self, input_channels, map_size):
super(attentionNew, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=map_size)
self.fc1 = nn.Linear(in_features=input_channels, out_features=
input_channels // 2)
self.fc2 = nn.Linear(in_features=input_channels // 2, out_features=
input_channels)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
yaowenlong/clique
|
attention
| false | 11,048 |
[
"MIT"
] | 0 |
a9814ef643f7dac6080cebf76ab804d942c9cd8e
|
https://github.com/yaowenlong/clique/tree/a9814ef643f7dac6080cebf76ab804d942c9cd8e
|
Attention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# dot_products => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del arg1_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_key': 4, 'drop_ratio': 0.5, 'causal': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class AttentionNew(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
xurantju/densecap
|
Attention
| false | 11,049 |
[
"BSD-3-Clause"
] | 0 |
2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
https://github.com/xurantju/densecap/tree/2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
MultiHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [query], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [key], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18)
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
del buf18
del buf6
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf20)
return (reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
del buf18
del buf6
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf20)
return reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4,
1), 0), primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHeadNew(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, input_0, input_1, input_2):
primals_1 = self.wq.weight
primals_3 = self.wk.weight
primals_5 = self.wv.weight
primals_7 = self.wo.weight
primals_2 = input_0
primals_4 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
xurantju/densecap
|
MultiHead
| false | 11,050 |
[
"BSD-3-Clause"
] | 0 |
2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
https://github.com/xurantju/densecap/tree/2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
CosineBasisLinear
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/vm/cvmxb4fm3754gb5xbvn445attxbnlnqmgxmnql4g235hqqjxgk4t.py
# Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos]
# Source node to ATen node mapping:
# arange => add, convert_element_type, iota, mul
# embedding => cos
# i_pi => mul_1
# mul_1 => mul_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 3.141592653589793), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %mul_1), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%mul_2,), kwargs = {})
triton_poi_fused_arange_cos_mul_0 = async_compile.triton('triton_poi_fused_arange_cos_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_cos_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 1 + x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 3.141592653589793
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl_math.cos(tmp5)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos]
stream0 = get_raw_stream(0)
triton_poi_fused_arange_cos_mul_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return (reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf0, (256, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
from torch import nn
def cosine_basis_functions(x, n_basis_functions=64):
"""Cosine basis functions used to embed quantile thresholds.
Args:
x (torch.Tensor): Input.
n_basis_functions (int): Number of cosine basis functions.
Returns:
ndarray: Embedding with shape of (x.shape + (n_basis_functions,)).
"""
i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device
=x.device) * np.pi
embedding = torch.cos(x[..., None] * i_pi)
assert embedding.shape == x.shape + (n_basis_functions,)
return embedding
class CosineBasisLinear(nn.Module):
"""Linear layer following cosine basis functions.
Args:
n_basis_functions (int): Number of cosine basis functions.
out_size (int): Output size.
"""
def __init__(self, n_basis_functions, out_size):
super().__init__()
self.linear = nn.Linear(n_basis_functions, out_size)
self.n_basis_functions = n_basis_functions
self.out_size = out_size
def forward(self, x):
"""Evaluate.
Args:
x (torch.Tensor): Input.
Returns:
torch.Tensor: Output with shape of (x.shape + (out_size,)).
"""
h = cosine_basis_functions(x, self.n_basis_functions)
h = h.reshape(-1, self.n_basis_functions)
out = self.linear(h)
out = out.reshape(*x.shape, self.out_size)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_basis_functions': 4, 'out_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1 + x0
tmp2 = tmp1.to(tl.float32)
tmp3 = 3.141592653589793
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp4
tmp6 = tl_math.cos(tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_arange_cos_mul_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (256, 4), (4, 1), 0)
def cosine_basis_functions(x, n_basis_functions=64):
"""Cosine basis functions used to embed quantile thresholds.
Args:
x (torch.Tensor): Input.
n_basis_functions (int): Number of cosine basis functions.
Returns:
ndarray: Embedding with shape of (x.shape + (n_basis_functions,)).
"""
i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device
=x.device) * np.pi
embedding = torch.cos(x[..., None] * i_pi)
assert embedding.shape == x.shape + (n_basis_functions,)
return embedding
class CosineBasisLinearNew(nn.Module):
"""Linear layer following cosine basis functions.
Args:
n_basis_functions (int): Number of cosine basis functions.
out_size (int): Output size.
"""
def __init__(self, n_basis_functions, out_size):
super().__init__()
self.linear = nn.Linear(n_basis_functions, out_size)
self.n_basis_functions = n_basis_functions
self.out_size = out_size
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tarokiritani/pfrl
|
CosineBasisLinear
| false | 11,051 |
[
"MIT"
] | 0 |
284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
https://github.com/tarokiritani/pfrl/tree/284ed1f43b32654a2ec1569b16a0f6b9acbd5e79
|
EncoderLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7f/c7fwok6q7j5rvjs3ob32s2cth5xjbedhynzb5ozchylog57bhmxv.py
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => var
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_31), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dw/cdwd24bmovp4kvuenv3jq6ffpahgl34iziauouexc57lxivmzubp.py
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# mean => mean
# mul => mul
# std => sqrt
# sub => sub_4
# truediv_4 => div_8
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_31), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %sub_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_8, %primals_7), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_4 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/va/cvayouropyisaprtjrhemadbdvsels72axdjsrgmbayknhu335yc.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_33,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dg/cdg2dxfjk7prchu44e4cgkid2y4524hl5vpyijgt6dwrnsrwzz2k.py
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_3 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_35), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j4/cj4wrybpym5umgwi5ropl654n64ptcknq2hunhzirmo6b5jmhqyj.py
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# mean_2 => mean_1
# mul_1 => mul_1
# std_2 => sqrt_1, var_1
# sub_1 => sub_5
# truediv_5 => div_9
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_3, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_3, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_12, %sub_5), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_9, %primals_13), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [query], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [key], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18)
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0); del buf6 # reuse
buf22 = buf21; del buf21 # reuse
buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_3.run(buf22, primals_2, buf20, buf23, 16, grid=grid(16), stream=stream0)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_4.run(primals_6, primals_2, buf20, buf23, buf22, primals_7, buf24, 64, grid=grid(64), stream=stream0)
del buf22
del buf23
del primals_7
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf26, primals_9, buf30, 64, grid=grid(64), stream=stream0)
del primals_9
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf28, buf24, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_12, buf28, primals_13, buf29, 64, grid=grid(64), stream=stream0)
del primals_13
return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13, buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8, primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.linear2 = nn.Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, x):
return self.feedforward(self.selfattn(x, x, x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_hidden': 4, 'n_heads': 4, 'drop_ratio': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0)
del buf6
buf22 = buf21
del buf21
buf23 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0)
del buf18
triton_poi_fused_add_mean_std_3[grid(16)](buf22, primals_2, buf20,
buf23, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_6,
primals_2, buf20, buf23, buf22, primals_7, buf24, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf22
del buf23
del primals_7
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf25)
buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0)
del buf25
buf30 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf26,
primals_9, buf30, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused_add_6[grid(64)](buf28, buf24, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_12,
buf28, primals_13, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return (buf29, primals_2, primals_6, primals_12, buf5, buf9, buf13,
buf17, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20,
reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(
buf26, (16, 4), (4, 1), 0), buf28, primals_10, buf30, primals_8,
primals_5, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0))
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.linear2 = nn.Linear(d_hidden, d_model)
def forward(self, x):
return self.linear2(F.relu(self.linear1(x)))
class Attention(nn.Module):
def __init__(self, d_key, drop_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(drop_ratio)
self.causal = causal
def forward(self, query, key, value):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and (self is None or self.causal):
tri = torch.ones(key.size(1), key.size(1)).triu(1) * INF
if key.is_cuda:
tri = tri
dot_products.data.sub_(tri.unsqueeze(0))
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, drop_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, drop_ratio, causal=causal)
self.wq = nn.Linear(d_key, d_key, bias=False)
self.wk = nn.Linear(d_key, d_key, bias=False)
self.wv = nn.Linear(d_value, d_value, bias=False)
self.wo = nn.Linear(d_value, d_key, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return self.wo(torch.cat([self.attention(q, k, v) for q, k, v in
zip(query, key, value)], -1))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, drop_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(drop_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x):
return self.layernorm(x[0] + self.dropout(self.layer(*x)))
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, d_hidden, n_heads, drop_ratio):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(d_model, d_model, n_heads,
drop_ratio), d_model, drop_ratio)
self.feedforward = ResidualBlock(FeedForward(d_model, d_hidden),
d_model, drop_ratio)
def forward(self, input_0):
primals_1 = self.selfattn.layer.wq.weight
primals_3 = self.selfattn.layer.wk.weight
primals_4 = self.selfattn.layer.wv.weight
primals_5 = self.selfattn.layer.wo.weight
primals_6 = self.selfattn.layernorm.gamma
primals_7 = self.selfattn.layernorm.beta
primals_8 = self.feedforward.layer.linear1.weight
primals_9 = self.feedforward.layer.linear1.bias
primals_10 = self.feedforward.layer.linear2.weight
primals_11 = self.feedforward.layer.linear2.bias
primals_12 = self.feedforward.layernorm.gamma
primals_13 = self.feedforward.layernorm.beta
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
xurantju/densecap
|
EncoderLayer
| false | 11,052 |
[
"BSD-3-Clause"
] | 0 |
2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
https://github.com/xurantju/densecap/tree/2e58501e453bf98b9cc892e5b64997f5c1dfc808
|
BertOutput
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6d/c6dv445enwddy5hcfn3p4vvjynq4oa2vnla2acdsag6hrxww6urd.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_2 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.utils.data
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-05)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertOutputNew(nn.Module):
def __init__(self, config):
super(BertOutputNew, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-05)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
zdxdsw/WebQA_VLP
|
BertOutput
| false | 11,053 |
[
"Apache-2.0"
] | 0 |
443bcd7e9b36db47e2ab4502abaaa3724800f394
|
https://github.com/zdxdsw/WebQA_VLP/tree/443bcd7e9b36db47e2ab4502abaaa3724800f394
|
CNN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tk/ctkr5j63ngqpsdf5zpn24uwouxiguv7jseip5x6trqjye5gti4rn.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rd/crdjuxm6zhlm4urook3ern4n6hhl5gi3jy25rg4to3kivhrpktom.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf9, primals_9, buf10, 65536, grid=grid(65536), stream=stream0)
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf4, buf5, buf7, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, last_layer_channels):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, (3, 3), padding='same')
self.conv2 = nn.Conv2d(32, 32, (3, 3), padding='same')
self.pool1 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 16, (3, 3), padding='same')
self.last_layer_channels = 16
self.conv4 = nn.Conv2d(16, self.last_layer_channels, (3, 3),
padding='same')
self.pool2 = nn.MaxPool2d(2, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'last_layer_channels': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4,
buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(65536)](buf7, primals_7,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(65536)](
buf9, primals_9, buf10, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf4, buf5, buf7, buf10)
class CNNNew(nn.Module):
def __init__(self, last_layer_channels):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, (3, 3), padding='same')
self.conv2 = nn.Conv2d(32, 32, (3, 3), padding='same')
self.pool1 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 16, (3, 3), padding='same')
self.last_layer_channels = 16
self.conv4 = nn.Conv2d(16, self.last_layer_channels, (3, 3),
padding='same')
self.pool2 = nn.MaxPool2d(2, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
zitkat/transformer-HTR
|
CNN
| false | 11,054 |
[
"Apache-2.0"
] | 0 |
fa14dc99f1050c022cd54bc82abe9bc8dbfbc95a
|
https://github.com/zitkat/transformer-HTR/tree/fa14dc99f1050c022cd54bc82abe9bc8dbfbc95a
|
CCCLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/2l/c2lumpq6tnnnhy7ek6bzpvqbvoeswddoefom3hhivyzjjsxtatvo.py
# Topologically Sorted Source Nodes: [mean, vx, mean_1, vy, mul, sum_1, pow_1, sum_2, sqrt, pow_2, sum_3, sqrt_1, mul_1, add, rho, mul_2, x_s, mul_3, y_s, mul_4, pow_3, pow_4, add_1, x_m, y_m, sub_2, pow_5, add_2, add_3, ccc, loss, mul_5], Original ATen: [aten.mean, aten.sub, aten.mul, aten.sum, aten.pow, aten.sqrt, aten.add, aten.div, aten.std, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# ccc => div_1
# loss => sub_3
# mean => mean
# mean_1 => mean_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# rho => div
# sqrt => sqrt
# sqrt_1 => sqrt_1
# sub_2 => sub_2
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# vx => sub
# vy => sub_1
# x_m => mean_2
# x_s => sqrt_2, var
# y_m => mean_3
# y_s => sqrt_3, var_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mean), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %sqrt_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg1_1,), kwargs = {correction: 1.0})
# %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %sqrt_2), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1,), kwargs = {correction: 1.0})
# %sqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sqrt_3), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt_2, 2), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt_3, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg1_1,), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_2, %mean_3), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %pow_5), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-10), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, %add_3), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 1.0), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 13, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp7 / tmp8
tmp12 = tmp4 - tmp11
tmp13 = tmp10 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp10 * tmp10
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp12 * tmp12
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tl.broadcast_to(tmp1, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = tl.full([1], 256, tl.int32)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 / tmp30
tmp32 = tmp1 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp38 = tl.broadcast_to(tmp5, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = tmp40 / tmp30
tmp42 = tmp5 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tl.broadcast_to(tmp43, [RBLOCK])
tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0))
tmp47 = libdevice.sqrt(tmp20)
tmp48 = libdevice.sqrt(tmp24)
tmp49 = tmp47 * tmp48
tmp50 = 1e-10
tmp51 = tmp49 + tmp50
tmp52 = tmp16 / tmp51
tmp53 = 2.0
tmp54 = tmp52 * tmp53
tmp55 = 255.0
tmp56 = tmp36 / tmp55
tmp57 = libdevice.sqrt(tmp56)
tmp58 = tmp54 * tmp57
tmp59 = tmp46 / tmp55
tmp60 = libdevice.sqrt(tmp59)
tmp61 = tmp58 * tmp60
tmp62 = tmp57 * tmp57
tmp63 = tmp60 * tmp60
tmp64 = tmp62 + tmp63
tmp65 = tmp9 - tmp11
tmp66 = tmp65 * tmp65
tmp67 = tmp64 + tmp66
tmp68 = tmp67 + tmp50
tmp69 = tmp61 / tmp68
tmp70 = 1.0
tmp71 = tmp70 - tmp69
tmp72 = tmp71 * tmp70
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp72, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf11 = empty_strided_cuda((), (), torch.float32)
buf13 = buf11; del buf11 # reuse
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [mean, vx, mean_1, vy, mul, sum_1, pow_1, sum_2, sqrt, pow_2, sum_3, sqrt_1, mul_1, add, rho, mul_2, x_s, mul_3, y_s, mul_4, pow_3, pow_4, add_1, x_m, y_m, sub_2, pow_5, add_2, add_3, ccc, loss, mul_5], Original ATen: [aten.mean, aten.sub, aten.mul, aten.sum, aten.pow, aten.sqrt, aten.add, aten.div, aten.std, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0.run(buf14, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class CCCLoss(nn.Module):
"""CCC loss for VA regression
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def get_name(self):
return 'CCC_loss'
def forward(self, cls_score, reg_label, **kwargs):
x, y = cls_score, reg_label
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.
sqrt(torch.sum(vy ** 2)) + 1e-10)
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (x_m - y_m) ** 2 +
1e-10)
loss = 1 - ccc
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp7 / tmp8
tmp12 = tmp4 - tmp11
tmp13 = tmp10 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp10 * tmp10
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp12 * tmp12
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp26 = tl.broadcast_to(tmp1, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = tl.full([1], 256, tl.int32)
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 / tmp30
tmp32 = tmp1 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp36 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0))
tmp38 = tl.broadcast_to(tmp5, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = tmp40 / tmp30
tmp42 = tmp5 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tl.broadcast_to(tmp43, [RBLOCK])
tmp46 = triton_helpers.promote_to_tensor(tl.sum(tmp44, 0))
tmp47 = libdevice.sqrt(tmp20)
tmp48 = libdevice.sqrt(tmp24)
tmp49 = tmp47 * tmp48
tmp50 = 1e-10
tmp51 = tmp49 + tmp50
tmp52 = tmp16 / tmp51
tmp53 = 2.0
tmp54 = tmp52 * tmp53
tmp55 = 255.0
tmp56 = tmp36 / tmp55
tmp57 = libdevice.sqrt(tmp56)
tmp58 = tmp54 * tmp57
tmp59 = tmp46 / tmp55
tmp60 = libdevice.sqrt(tmp59)
tmp61 = tmp58 * tmp60
tmp62 = tmp57 * tmp57
tmp63 = tmp60 * tmp60
tmp64 = tmp62 + tmp63
tmp65 = tmp9 - tmp11
tmp66 = tmp65 * tmp65
tmp67 = tmp64 + tmp66
tmp68 = tmp67 + tmp50
tmp69 = tmp61 / tmp68
tmp70 = 1.0
tmp71 = tmp70 - tmp69
tmp72 = tmp71 * tmp70
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp72, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf11 = empty_strided_cuda((), (), torch.float32)
buf13 = buf11
del buf11
buf14 = buf13
del buf13
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_rsub_sqrt_std_sub_sum_0[grid(1)](
buf14, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf14,
class CCCLossNew(nn.Module):
"""CCC loss for VA regression
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def get_name(self):
return 'CCC_loss'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
youqingxiaozhua/ABAW3
|
CCCLoss
| false | 11,055 |
[
"Apache-2.0"
] | 0 |
51ab58ab311ecd6603a8485a45af0dcc39880e69
|
https://github.com/youqingxiaozhua/ABAW3/tree/51ab58ab311ecd6603a8485a45af0dcc39880e69
|
Unet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/xf/cxfcqgqzkxpafh7ngi5lmyvtgswx4bwrz3upkkjlnp6wzixzpn5k.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/av/cavhjnlb3jhzdiam6ww4odoew4b7etx4mfdwllv5macorejth2wz.py
# Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd => add
# x_2 => convolution_1
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %primals_1), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_1, %slice_scatter_default, 1, 0, 1), kwargs = {})
# %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 1, 0, 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_2, 0.01), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %slice_scatter_default_2, %mul_1), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 131072)
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp3, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp2, eviction_policy='evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ji/cji5l2esho4uvrao5eeomydgbe3hjk4sdot5hvrdbfwiux6vsdse.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_4 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 32), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/42/c42ryamg2fbchnrdanzxiam3yphdwkdejaxnqprizpnvuejwqtar.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_5 => convolution_3
# x_6 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_3, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ag/cageczbpbdq2zvqdke7tpgave3qsnz6g7oqwi32lwbqxmfh6cqgr.py
# Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_1 => add_1
# x_7 => convolution_4
# x_8 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_39, %convolution_2), kwargs = {})
# %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %add_1, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_4, %slice_scatter_default_3, 1, 0, 32), kwargs = {})
# %slice_scatter_default_5 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_49, 1, 0, 32), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_5, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_5, 0.01), kwargs = {})
# %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %slice_scatter_default_5, %mul_3), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 64
x3 = xindex
x2 = (xindex // 65536)
x4 = xindex % 65536
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wm/cwmx573hgzpqv72fn6et3riktwpbit3or4nztj7ce6dfrczkaddw.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_9 => convolution_5
# Graph fragment:
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_12, %primals_13, [2, 2], [0, 0], [1, 1], False, [0, 0], 64), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/3w/c3wcxzpxikyyqdrw6kpbwbywojsljwovzvu54xks2iemn2a2bek6.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_10 => convolution_6
# x_11 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.01), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_6, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uq/cuqzrsdrmc6acrieikbfirzninntl7f3rpxs5iatziafgkhripbr.py
# Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_2 => add_2
# x_12 => convolution_7
# x_13 => gt_5, mul_5, where_5
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_76, %convolution_5), kwargs = {})
# %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_2, %add_2, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_7, %slice_scatter_default_6, 1, 0, 64), kwargs = {})
# %slice_scatter_default_8 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_86, 1, 0, 64), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_8, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_8, 0.01), kwargs = {})
# %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %slice_scatter_default_8, %mul_5), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 128
x3 = xindex
x2 = (xindex // 32768)
x4 = xindex % 32768
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/z5/cz5htevaalapzdiv2ghjv26w2a3mj4iej6i77fm4swdcujsblszp.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_14 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_18, %primals_19, [2, 2], [0, 0], [1, 1], False, [0, 0], 128), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qm/cqmscszyypzxc4zzgkthiigoojenozafjwojwc52fv6pbxgqrvep.py
# Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_15 => convolution_9
# x_16 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_9, 0.01), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_9, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pn/cpnmjt3x2nmyv3a3tzn6a3wszlt7xdk4x5wv6ffqsn324kfaib4f.py
# Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_3 => add_3
# x_17 => convolution_10
# x_18 => gt_7, mul_7, where_7
# Graph fragment:
# %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_113, %convolution_8), kwargs = {})
# %slice_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_3, %add_3, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_10, %slice_scatter_default_9, 1, 0, 128), kwargs = {})
# %slice_scatter_default_11 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %slice_123, 1, 0, 128), kwargs = {})
# %gt_7 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_11, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_11, 0.01), kwargs = {})
# %where_7 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %slice_scatter_default_11, %mul_7), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 256
x3 = xindex
x2 = (xindex // 16384)
x4 = xindex % 16384
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 128, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pn/cpntx5ingplfviuf3scpf2ploxf2cbj6dppd5twxxapjxxzeb2gw.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_19 => convolution_11
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_24, %primals_25, [2, 2], [0, 0], [1, 1], False, [0, 0], 256), kwargs = {})
triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5p/c5pvu73hvxjushlgcdbeq425vx4apnqtyg3qns5wrorg2nbmszzc.py
# Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_20 => convolution_12
# x_21 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_12, 0.01), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_12, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_12 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rh/crhinxcz2vvog2xvydnfton7wspkzsg5t3cjjz2xt2wuzhoi354w.py
# Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_4 => add_4
# x_22 => convolution_13
# x_23 => gt_9, mul_9, where_9
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_8, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_13, %convolution_11), kwargs = {})
# %gt_9 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_4, 0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 0.01), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_4, %mul_9), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_13 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xz/cxzdy4wlhiutyuwqhvz5mul3hsest2j4rhjcdy5iicga6ukzem6f.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_25 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_14, %where_7], 1), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 512
x0 = xindex % 64
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 512, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zt/cztj2a2rppkblw2uusvgftlbe5t6xd2fvbauq357vhaoyj3caget.py
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_26 => convolution_15
# x_27 => gt_10, mul_10, where_10
# Graph fragment:
# %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.01), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_15, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_15 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/eq/ceqlawpx2k7y4mlllr6mlufqinhrt5n2lyebv6w73yy753ifmwl4.py
# Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_5 => add_5
# x_28 => convolution_16
# x_29 => gt_11, mul_11, where_11
# Graph fragment:
# %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_16, %slice_181), kwargs = {})
# %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.01), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %add_5, %mul_11), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
x2 = (xindex // 8192)
x4 = xindex % 8192
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (32768*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/si/csi3nwzyblsdh2cv7kre43tug6gju4orzk6cf7c7tldsjdjh4fge.py
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_31 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_17, %where_5], 1), kwargs = {})
triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vg/cvgp54mfma7r4duil452q6fabyts4tskbusq47l54yokhacdvrdo.py
# Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_32 => convolution_18
# x_33 => gt_12, mul_12, where_12
# Graph fragment:
# %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_18, 0.01), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %convolution_18, %mul_12), kwargs = {})
triton_poi_fused_convolution_leaky_relu_18 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bx/cbxupgpftsatjh5mnu5m25yosvv4kcrmlb3ewe5ehw6h62behogi.py
# Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_6 => add_6
# x_34 => convolution_19
# x_35 => gt_13, mul_13, where_13
# Graph fragment:
# %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_12, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_19, %slice_210), kwargs = {})
# %gt_13 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.01), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %add_6, %mul_13), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_19 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
x2 = (xindex // 16384)
x4 = xindex % 16384
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (65536*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qy/cqyx4ivlwl5e3hnixgpop5rodo77ijdhyu4cykacthp6b6pblqtn.py
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_37 => cat_2
# Graph fragment:
# %cat_2 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_20, %where_3], 1), kwargs = {})
triton_poi_fused_cat_20 = async_compile.triton('triton_poi_fused_cat_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 128
x0 = xindex % 1024
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 128, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/go/cgo77j6qmweqj7pb3bvmfnhkd4xbawaqr62ui6dn7unxpajvsiwi.py
# Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_38 => convolution_21
# x_39 => gt_14, mul_14, where_14
# Graph fragment:
# %convolution_21 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_44, %primals_45, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_21, 0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_21, 0.01), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %convolution_21, %mul_14), kwargs = {})
triton_poi_fused_convolution_leaky_relu_21 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uv/cuvfebza4ywja4afrbp7q4jsfdigha6t7ov3i45g66lzrpeeyydk.py
# Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_7 => add_7
# x_40 => convolution_22
# x_41 => gt_15, mul_15, where_15
# Graph fragment:
# %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_14, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_22, %slice_239), kwargs = {})
# %gt_15 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_7, 0), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.01), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %add_7, %mul_15), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
x2 = (xindex // 32768)
x4 = xindex % 32768
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (131072*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qm/cqmwkfzmikcapbfklwnx5duzt3m2kkiznvbfkik2i6kizaagzljn.py
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_43 => cat_3
# Graph fragment:
# %cat_3 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_23, %where_1], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 64, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uh/cuhiatd4b3g4fq4hwiu5kyq5ygtth7buqp4idbziynhcgnc5b6kn.py
# Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_44 => convolution_24
# x_45 => gt_16, mul_16, where_16
# Graph fragment:
# %convolution_24 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_50, %primals_51, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_24, 0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_24, 0.01), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_16, %convolution_24, %mul_16), kwargs = {})
triton_poi_fused_convolution_leaky_relu_24 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hm/chmdwzfjerzpipa7qmmz7fdqjhovmyj2quztbhodv7vkpuqk4l6n.py
# Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_8 => add_8
# x_46 => convolution_25
# x_47 => gt_17, mul_17, where_17
# Graph fragment:
# %convolution_25 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_16, %primals_52, %primals_53, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_25, %slice_268), kwargs = {})
# %gt_17 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_8, 0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.01), kwargs = {})
# %where_17 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %add_8, %mul_17), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_25 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
x1 = (xindex // 4096)
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0 + (262144*x1)), None)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 0.01
tmp9 = tmp5 * tmp8
tmp10 = tl.where(tmp7, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp7, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_25, (256, ), (1, ))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256, ), (1, ))
assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_29, (256, ), (1, ))
assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (128, ), (1, ))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_43, (64, ), (1, ))
assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_45, (32, ), (1, ))
assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (32, ), (1, ))
assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_49, (32, ), (1, ))
assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (1, ), (1, ))
assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_53, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf3, primals_5, primals_1, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf7, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf8, primals_9, buf9, buf10, 262144, grid=grid(262144), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf13 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_4.run(buf11, primals_11, buf7, buf12, buf13, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf15, primals_13, 65536, grid=grid(65536), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1))
buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf16, primals_15, buf17, buf18, 131072, grid=grid(131072), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf21 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_7.run(buf19, primals_17, buf15, buf20, buf21, 131072, grid=grid(131072), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf23, primals_19, 32768, grid=grid(32768), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_9.run(buf24, primals_21, buf25, buf26, 65536, grid=grid(65536), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1))
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf29 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_10.run(buf27, primals_23, buf23, buf28, buf29, 65536, grid=grid(65536), stream=stream0)
del buf27
del primals_23
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
triton_poi_fused_convolution_11.run(buf31, primals_25, 16384, grid=grid(16384), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1))
buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_12.run(buf32, primals_27, buf33, buf34, 16384, grid=grid(16384), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1))
buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
buf37 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_13.run(buf35, primals_29, buf31, buf36, buf37, 16384, grid=grid(16384), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1))
buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf38, primals_31, buf29, buf39, 131072, grid=grid(131072), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1))
buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_15.run(buf40, primals_33, buf41, buf42, 32768, grid=grid(32768), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
buf45 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_16.run(buf43, primals_35, buf39, buf44, buf45, 32768, grid=grid(32768), stream=stream0)
del buf43
del primals_35
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1))
buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat]
triton_poi_fused_cat_17.run(buf46, primals_37, buf21, buf47, 262144, grid=grid(262144), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1))
buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_18.run(buf48, primals_39, buf49, buf50, 65536, grid=grid(65536), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1))
buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
buf53 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_19.run(buf51, primals_41, buf47, buf52, buf53, 65536, grid=grid(65536), stream=stream0)
del buf51
del primals_41
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat]
triton_poi_fused_cat_20.run(buf54, primals_43, buf13, buf55, 524288, grid=grid(524288), stream=stream0)
del buf54
del primals_43
# Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1), 0); del buf46 # reuse
# Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_21.run(buf56, primals_45, buf57, buf58, 131072, grid=grid(131072), stream=stream0)
del primals_45
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
buf61 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_22.run(buf59, primals_47, buf55, buf60, buf61, 131072, grid=grid(131072), stream=stream0)
del buf59
del primals_47
# Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf62, primals_49, buf5, buf63, 1048576, grid=grid(1048576), stream=stream0)
del buf62
del primals_49
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool)
buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf35 # reuse
# Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_24.run(buf64, primals_51, buf65, buf66, 16384, grid=grid(16384), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool)
buf69 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_25.run(buf67, primals_53, buf63, buf68, buf69, 16384, grid=grid(16384), stream=stream0)
del buf67
del primals_53
return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10, buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((128, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((32, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=
'batch', residual=True, activation='leakyrelu', transpose=False):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'mixed':
self.norm1 = nn.BatchNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU()
self.actfun2 = nn.ReLU()
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU()
self.actfun2 = nn.LeakyReLU()
elif self.activation == 'elu':
self.actfun1 = nn.ELU()
self.actfun2 = nn.ELU()
elif self.activation == 'selu':
self.actfun1 = nn.SELU()
self.actfun2 = nn.SELU()
def forward(self, x):
ox = x
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
if self.norm1:
x = self.norm1(x)
x = self.actfun1(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.norm2:
x = self.norm2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
return x
class Unet(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1, residual=False,
down='conv', up='tconv', activation='selu'):
super(Unet, self).__init__()
self.residual = residual
if down == 'maxpool':
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
elif down == 'avgpool':
self.down1 = nn.AvgPool2d(kernel_size=2)
self.down2 = nn.AvgPool2d(kernel_size=2)
self.down3 = nn.AvgPool2d(kernel_size=2)
self.down4 = nn.AvgPool2d(kernel_size=2)
elif down == 'conv':
self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32)
self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64)
self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25
self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25
self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25
self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25
self.down1.bias.data = 0.01 * self.down1.bias.data + 0
self.down2.bias.data = 0.01 * self.down2.bias.data + 0
self.down3.bias.data = 0.01 * self.down3.bias.data + 0
self.down4.bias.data = 0.01 * self.down4.bias.data + 0
if up == 'bilinear' or up == 'nearest':
self.up1 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
elif up == 'tconv':
self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2,
groups=64)
self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2,
groups=32)
self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25
self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25
self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25
self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25
self.up1.bias.data = 0.01 * self.up1.bias.data + 0
self.up2.bias.data = 0.01 * self.up2.bias.data + 0
self.up3.bias.data = 0.01 * self.up3.bias.data + 0
self.up4.bias.data = 0.01 * self.up4.bias.data + 0
self.conv1 = ConvBlock(n_channel_in, 32, residual, activation)
self.conv2 = ConvBlock(32, 64, residual, activation)
self.conv3 = ConvBlock(64, 128, residual, activation)
self.conv4 = ConvBlock(128, 256, residual, activation)
self.conv5 = ConvBlock(256, 256, residual, activation)
self.conv6 = ConvBlock(2 * 256, 128, residual, activation)
self.conv7 = ConvBlock(2 * 128, 64, residual, activation)
self.conv8 = ConvBlock(2 * 64, 32, residual, activation)
self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation)
if self.residual:
self.convres = ConvBlock(n_channel_in, n_channel_out, residual,
activation)
def forward(self, x):
c0 = x
c1 = self.conv1(x)
x = self.down1(c1)
c2 = self.conv2(x)
x = self.down2(c2)
c3 = self.conv3(x)
x = self.down3(c3)
c4 = self.conv4(x)
x = self.down4(c4)
x = self.conv5(x)
x = self.up1(x)
x = torch.cat([x, c4], 1)
x = self.conv6(x)
x = self.up2(x)
x = torch.cat([x, c3], 1)
x = self.conv7(x)
x = self.up3(x)
x = torch.cat([x, c2], 1)
x = self.conv8(x)
x = self.up4(x)
x = torch.cat([x, c1], 1)
x = self.conv9(x)
if self.residual:
x = torch.add(x, self.convres(c0))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 131072
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp3, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp2, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 64
x3 = xindex
x2 = xindex // 65536
x4 = xindex % 65536
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 128
x3 = xindex
x2 = xindex // 32768
x4 = xindex % 32768
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x3 = xindex
x2 = xindex // 16384
x4 = xindex % 16384
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 128, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 512
x0 = xindex % 64
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
x2 = xindex // 8192
x4 = xindex % 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 32768 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
x2 = xindex // 16384
x4 = xindex % 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 65536 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 128
x0 = xindex % 1024
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
x2 = xindex // 32768
x4 = xindex % 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 131072 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
x1 = xindex // 4096
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0 + 262144 * x1), None)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 0.01
tmp9 = tmp5 * tmp8
tmp10 = tl.where(tmp7, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp7, None)
tl.store(out_ptr1 + x2, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52, primals_53
) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_25, (256,), (1,))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (128,), (1,))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_43, (64,), (1,))
assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_45, (32,), (1,))
assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (32,), (1,))
assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_49, (32,), (1,))
assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (1,), (1,))
assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_53, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_3, buf1, buf2, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_add_convolution_leaky_relu_1[grid(524288)](buf3,
primals_5, primals_1, buf4, buf5, 524288, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(131072)](buf7, primals_7,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(262144)](buf8,
primals_9, buf9, buf10, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_9
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf13 = buf8
del buf8
triton_poi_fused_add_convolution_leaky_relu_4[grid(262144)](buf11,
primals_11, buf7, buf12, buf13, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_5[grid(65536)](buf15, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1))
buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(131072)](buf16,
primals_15, buf17, buf18, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_15
buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf21 = buf16
del buf16
triton_poi_fused_add_convolution_leaky_relu_7[grid(131072)](buf19,
primals_17, buf15, buf20, buf21, 131072, XBLOCK=512, num_warps=
8, num_stages=1)
del primals_17
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_8[grid(32768)](buf23, primals_19,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_9[grid(65536)](buf24,
primals_21, buf25, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_21
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1))
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf29 = buf24
del buf24
triton_poi_fused_add_convolution_leaky_relu_10[grid(65536)](buf27,
primals_23, buf23, buf28, buf29, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf27
del primals_23
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_11[grid(16384)](buf31, primals_25,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1))
buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_12[grid(16384)](buf32,
primals_27, buf33, buf34, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_27
buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1))
buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
buf37 = buf32
del buf32
triton_poi_fused_add_convolution_leaky_relu_13[grid(16384)](buf35,
primals_29, buf31, buf36, buf37, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_29
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1))
buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0)
del buf19
triton_poi_fused_cat_14[grid(131072)](buf38, primals_31, buf29,
buf39, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1))
buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_15[grid(32768)](buf40,
primals_33, buf41, buf42, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_33
buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
buf45 = buf40
del buf40
triton_poi_fused_add_convolution_leaky_relu_16[grid(32768)](buf43,
primals_35, buf39, buf44, buf45, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf43
del primals_35
buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1))
buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16,
1), 0)
del buf11
triton_poi_fused_cat_17[grid(262144)](buf46, primals_37, buf21,
buf47, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_37
buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1))
buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16,
1), 0)
del buf38
triton_poi_fused_convolution_leaky_relu_18[grid(65536)](buf48,
primals_39, buf49, buf50, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_39
buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1))
buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
buf53 = buf48
del buf48
triton_poi_fused_add_convolution_leaky_relu_19[grid(65536)](buf51,
primals_41, buf47, buf52, buf53, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf51
del primals_41
buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024,
32, 1), 0)
del buf3
triton_poi_fused_cat_20[grid(524288)](buf54, primals_43, buf13,
buf55, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf54
del primals_43
buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32,
1), 0)
del buf46
triton_poi_fused_convolution_leaky_relu_21[grid(131072)](buf56,
primals_45, buf57, buf58, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_45
buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
buf61 = buf56
del buf56
triton_poi_fused_add_convolution_leaky_relu_22[grid(131072)](buf59,
primals_47, buf55, buf60, buf61, 131072, XBLOCK=512, num_warps=
8, num_stages=1)
del buf59
del primals_47
buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_23[grid(1048576)](buf62, primals_49, buf5,
buf63, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del buf62
del primals_49
buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf35
triton_poi_fused_convolution_leaky_relu_24[grid(16384)](buf64,
primals_51, buf65, buf66, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_51
buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
buf69 = buf64
del buf64
triton_poi_fused_add_convolution_leaky_relu_25[grid(16384)](buf67,
primals_53, buf63, buf68, buf69, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf67
del primals_53
return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, primals_48,
primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10,
buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25,
buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39,
buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53,
buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=
'batch', residual=True, activation='leakyrelu', transpose=False):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'mixed':
self.norm1 = nn.BatchNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU()
self.actfun2 = nn.ReLU()
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU()
self.actfun2 = nn.LeakyReLU()
elif self.activation == 'elu':
self.actfun1 = nn.ELU()
self.actfun2 = nn.ELU()
elif self.activation == 'selu':
self.actfun1 = nn.SELU()
self.actfun2 = nn.SELU()
def forward(self, x):
ox = x
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
if self.norm1:
x = self.norm1(x)
x = self.actfun1(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.norm2:
x = self.norm2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
return x
class UnetNew(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1, residual=False,
down='conv', up='tconv', activation='selu'):
super(UnetNew, self).__init__()
self.residual = residual
if down == 'maxpool':
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
elif down == 'avgpool':
self.down1 = nn.AvgPool2d(kernel_size=2)
self.down2 = nn.AvgPool2d(kernel_size=2)
self.down3 = nn.AvgPool2d(kernel_size=2)
self.down4 = nn.AvgPool2d(kernel_size=2)
elif down == 'conv':
self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32)
self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64)
self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25
self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25
self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25
self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25
self.down1.bias.data = 0.01 * self.down1.bias.data + 0
self.down2.bias.data = 0.01 * self.down2.bias.data + 0
self.down3.bias.data = 0.01 * self.down3.bias.data + 0
self.down4.bias.data = 0.01 * self.down4.bias.data + 0
if up == 'bilinear' or up == 'nearest':
self.up1 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
elif up == 'tconv':
self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2,
groups=64)
self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2,
groups=32)
self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25
self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25
self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25
self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25
self.up1.bias.data = 0.01 * self.up1.bias.data + 0
self.up2.bias.data = 0.01 * self.up2.bias.data + 0
self.up3.bias.data = 0.01 * self.up3.bias.data + 0
self.up4.bias.data = 0.01 * self.up4.bias.data + 0
self.conv1 = ConvBlock(n_channel_in, 32, residual, activation)
self.conv2 = ConvBlock(32, 64, residual, activation)
self.conv3 = ConvBlock(64, 128, residual, activation)
self.conv4 = ConvBlock(128, 256, residual, activation)
self.conv5 = ConvBlock(256, 256, residual, activation)
self.conv6 = ConvBlock(2 * 256, 128, residual, activation)
self.conv7 = ConvBlock(2 * 128, 64, residual, activation)
self.conv8 = ConvBlock(2 * 64, 32, residual, activation)
self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation)
if self.residual:
self.convres = ConvBlock(n_channel_in, n_channel_out, residual,
activation)
def forward(self, input_0):
primals_6 = self.down1.weight
primals_3 = self.down1.bias
primals_12 = self.down2.weight
primals_9 = self.down2.bias
primals_18 = self.down3.weight
primals_15 = self.down3.bias
primals_24 = self.down4.weight
primals_21 = self.down4.bias
primals_30 = self.up1.weight
primals_23 = self.up1.bias
primals_36 = self.up2.weight
primals_17 = self.up2.bias
primals_42 = self.up3.weight
primals_11 = self.up3.bias
primals_48 = self.up4.weight
primals_5 = self.up4.bias
primals_2 = self.conv1.conv1.weight
primals_7 = self.conv1.conv1.bias
primals_4 = self.conv1.conv2.weight
primals_45 = self.conv1.conv2.bias
primals_8 = self.conv2.conv1.weight
primals_13 = self.conv2.conv1.bias
primals_10 = self.conv2.conv2.weight
primals_39 = self.conv2.conv2.bias
primals_14 = self.conv3.conv1.weight
primals_19 = self.conv3.conv1.bias
primals_16 = self.conv3.conv2.weight
primals_33 = self.conv3.conv2.bias
primals_20 = self.conv4.conv1.weight
primals_25 = self.conv4.conv1.bias
primals_22 = self.conv4.conv2.weight
primals_27 = self.conv4.conv2.bias
primals_26 = self.conv5.conv1.weight
primals_29 = self.conv5.conv1.bias
primals_28 = self.conv5.conv2.weight
primals_31 = self.conv5.conv2.bias
primals_32 = self.conv6.conv1.weight
primals_35 = self.conv6.conv1.bias
primals_34 = self.conv6.conv2.weight
primals_37 = self.conv6.conv2.bias
primals_38 = self.conv7.conv1.weight
primals_41 = self.conv7.conv1.bias
primals_40 = self.conv7.conv2.weight
primals_43 = self.conv7.conv2.bias
primals_44 = self.conv8.conv1.weight
primals_47 = self.conv8.conv1.bias
primals_46 = self.conv8.conv2.weight
primals_49 = self.conv8.conv2.bias
primals_50 = self.conv9.conv1.weight
primals_51 = self.conv9.conv1.bias
primals_52 = self.conv9.conv2.weight
primals_53 = self.conv9.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53])
return output[0]
|
tim-vdl/noise2self
|
Unet
| false | 11,056 |
[
"MIT"
] | 0 |
2cf10d20d988dc7b6c1278150f170aa3e3335b28
|
https://github.com/tim-vdl/noise2self/tree/2cf10d20d988dc7b6c1278150f170aa3e3335b28
|
MSELoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/7p/c7po76azmxaenhqdhxmjkwmmyrh5jpr4p5zydgfurvfy3q5qm6gp.py
# Topologically Sorted Source Nodes: [mse_loss, loss_cls], Original ATen: [aten.mse_loss, aten.mul]
# Source node to ATen node mapping:
# loss_cls => mul
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss, loss_cls], Original ATen: [aten.mse_loss, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MSELoss(nn.Module):
"""MSE loss.
Args:
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, reduction='mean', loss_weight=1.0, loss_name='mse_loss'
):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.loss_name = loss_name
self.cls_criterion = nn.MSELoss(reduction=reduction)
def get_name(self):
return 'MSE_loss'
def forward(self, cls_score, reg_label, **kwargs):
loss_cls = self.loss_weight * self.cls_criterion(cls_score, reg_label)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSELossNew(nn.Module):
"""MSE loss.
Args:
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, reduction='mean', loss_weight=1.0, loss_name='mse_loss'
):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.loss_name = loss_name
self.cls_criterion = nn.MSELoss(reduction=reduction)
def get_name(self):
return 'MSE_loss'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
youqingxiaozhua/ABAW3
|
MSELoss
| false | 11,057 |
[
"Apache-2.0"
] | 0 |
51ab58ab311ecd6603a8485a45af0dcc39880e69
|
https://github.com/youqingxiaozhua/ABAW3/tree/51ab58ab311ecd6603a8485a45af0dcc39880e69
|
Net1
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/oc/cocyslomdha4xvngb5snlicdft7gehmc6r3h63wx6pgk4zxtwt2d.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x => convolution
# x_1 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [3, 3], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %convolution), kwargs = {})
triton_poi_fused_convolution_mul_0 = async_compile.triton('triton_poi_fused_convolution_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 400) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mn/cmnl7eldl5izyd3tela7ztxsynzr6a3jl6zko4hudla466nyjxbq.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_4 => mul_1
# Graph fragment:
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, %addmm), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 256), (256, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (10, 64), (64, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(3, 3), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 20, 20), (1600, 400, 20, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_0.run(buf1, primals_2, buf2, 6400, grid=grid(6400), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (25, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 64), (1, 256), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf3, buf4, 1600, grid=grid(1600), stream=stream0)
buf5 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (buf5, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (25, 256), (256, 1), 0), buf3, buf4, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
def square(x):
return x * x
class Net1(torch.nn.Module):
def __init__(self, hidden=64, output=10):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 4, kernel_size=7, padding=0, stride=3)
self.fc1 = torch.nn.Linear(256, hidden)
self.fc2 = torch.nn.Linear(hidden, output)
def forward(self, x):
x = self.conv1(x)
x = square(x)
x = x.view(-1, 256)
x = self.fc1(x)
x = square(x)
x = self.fc2(x)
return x
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = square(o1)
o1a = x.view(-1, 256)
o2 = self.fc1(o1a)
o2a = square(o2)
o3 = self.fc2(o2a)
return o1, o1a, o2, o2a, o3
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 400 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 256), (256, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (10, 64), (64, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(3,
3), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 20, 20), (1600, 400, 20, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_0[grid(6400)](buf1, primals_2,
buf2, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (25, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 64), (1, 256),
0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
triton_poi_fused_mul_1[grid(1600)](buf3, buf4, 1600, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (25,
256), (256, 1), 0), buf3, buf4, primals_6, primals_4
def square(x):
return x * x
class Net1New(torch.nn.Module):
def __init__(self, hidden=64, output=10):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 4, kernel_size=7, padding=0, stride=3)
self.fc1 = torch.nn.Linear(256, hidden)
self.fc2 = torch.nn.Linear(hidden, output)
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = square(o1)
o1a = x.view(-1, 256)
o2 = self.fc1(o1a)
o2a = square(o2)
o3 = self.fc2(o2a)
return o1, o1a, o2, o2a, o3
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yxtj/henn
|
Net1
| false | 11,058 |
[
"MIT"
] | 0 |
5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
https://github.com/yxtj/henn/tree/5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
AsymmetricLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ga/cga4e74tld64owyz36xxkvzsnmfuphjsiuzgznmo7gpta3vrpnf7.py
# Topologically Sorted Source Nodes: [pred_sigmoid, sub, add, clamp, sub_1, mul, mul_1, pt, clamp_1, log, neg, sub_2, mul_2, sub_3, mul_3, add_2, asymmetric_weight, loss, loss_1, loss_cls], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.clamp, aten.mul, aten.log, aten.neg, aten.pow, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# asymmetric_weight => pow_1
# clamp => clamp_max
# clamp_1 => clamp_min
# log => log
# loss => mul_4
# loss_1 => mean
# loss_cls => mul_5
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# neg => neg
# pred_sigmoid => sigmoid
# pt => add_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 0.05), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %sub_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_2, %add_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp7 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 0.05
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tmp8 = tmp2 - tmp7
tmp9 = tmp6 * tmp8
tmp10 = tmp1 * tmp7
tmp11 = tmp9 + tmp10
tmp12 = 1e-08
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp2 - tmp11
tmp17 = 0.0
tmp18 = tmp7 * tmp17
tmp19 = 4.0
tmp20 = tmp8 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = libdevice.pow(tmp16, tmp21)
tmp23 = tmp15 * tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp2
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pred_sigmoid, sub, add, clamp, sub_1, mul, mul_1, pt, clamp_1, log, neg, sub_2, mul_2, sub_3, mul_3, add_2, asymmetric_weight, loss, loss_1, loss_cls], Original ATen: [aten.sigmoid, aten.rsub, aten.add, aten.clamp, aten.mul, aten.log, aten.neg, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0,
clip=0.05, reduction='mean', avg_factor=None, use_sigmoid=True, eps=1e-08):
"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Defaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = nn.functional.softmax(pred, dim=-1)
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target
) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 -
target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def convert_to_one_hot(targets: 'torch.Tensor', classes) ->torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert torch.max(targets).item(
) < classes, 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch.
long, device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
class AsymmetricLoss(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
"""
def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction=
'mean', loss_weight=1.0, use_sigmoid=True, eps=1e-08):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
self.use_sigmoid = use_sigmoid
self.eps = eps
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""asymmetric loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \\*), N or (N,1).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \\*). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if target.dim() == 1 or target.dim() == 2 and target.shape[1] == 1:
target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])
loss_cls = self.loss_weight * asymmetric_loss(pred, target, weight,
gamma_pos=self.gamma_pos, gamma_neg=self.gamma_neg, clip=self.
clip, reduction=reduction, avg_factor=avg_factor, use_sigmoid=
self.use_sigmoid, eps=self.eps)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 0.05
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tmp8 = tmp2 - tmp7
tmp9 = tmp6 * tmp8
tmp10 = tmp1 * tmp7
tmp11 = tmp9 + tmp10
tmp12 = 1e-08
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp2 - tmp11
tmp17 = 0.0
tmp18 = tmp7 * tmp17
tmp19 = 4.0
tmp20 = tmp8 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = libdevice.pow(tmp16, tmp21)
tmp23 = tmp15 * tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp2
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0[grid(1)
](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0,
clip=0.05, reduction='mean', avg_factor=None, use_sigmoid=True, eps=1e-08):
"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Defaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = nn.functional.softmax(pred, dim=-1)
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target
) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 -
target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def convert_to_one_hot(targets: 'torch.Tensor', classes) ->torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert torch.max(targets).item(
) < classes, 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch.
long, device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
class AsymmetricLossNew(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
"""
def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction=
'mean', loss_weight=1.0, use_sigmoid=True, eps=1e-08):
super(AsymmetricLossNew, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
self.use_sigmoid = use_sigmoid
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
youqingxiaozhua/ABAW3
|
AsymmetricLoss
| false | 11,059 |
[
"Apache-2.0"
] | 0 |
51ab58ab311ecd6603a8485a45af0dcc39880e69
|
https://github.com/youqingxiaozhua/ABAW3/tree/51ab58ab311ecd6603a8485a45af0dcc39880e69
|
Net2
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/rq/crq32svgxankwdjlqan4a6sku5kvy6sqmxbrztiyacfcdwtx3pju.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x => convolution
# x_1 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %convolution), kwargs = {})
triton_poi_fused_convolution_mul_0 = async_compile.triton('triton_poi_fused_convolution_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 900) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4s/c4sitfkjm2im3dpidrguuhih3parhqz425g4rohdwk5sk2lefgax.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => mul_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %convolution_1), kwargs = {})
triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 196) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/sf/csfno6u7s7ukc5al2orjmjakr4c3bt4qennqvt76xfyrcko6ioac.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x_4 => convolution_2
# x_5 => mul_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %convolution_2), kwargs = {})
triton_poi_fused_convolution_mul_2 = async_compile.triton('triton_poi_fused_convolution_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 144) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (10, 36), (36, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 30, 30), (7200, 900, 30, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 8, 30, 30), (7200, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_0.run(buf1, primals_2, buf2, 28800, grid=grid(28800), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 14, 14), (12544, 196, 14, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_1.run(buf4, primals_5, buf5, 50176, grid=grid(50176), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 12, 12), (576, 144, 12, 1))
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_2.run(buf7, primals_7, buf8, 2304, grid=grid(2304), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 36), (36, 1), 0), reinterpret_tensor(primals_8, (36, 10), (1, 36), 0), alpha=1, beta=1, out=buf9)
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5, buf7, reinterpret_tensor(buf8, (64, 36), (36, 1), 0), primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 36), (36, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
def square(x):
return x * x
class Net2(torch.nn.Module):
def __init__(self, act=square, output=10):
super().__init__()
self.act = act
self.conv1 = torch.nn.Conv2d(1, 8, kernel_size=5, stride=2, padding=0)
self.conv2 = torch.nn.Conv2d(8, 64, kernel_size=3, stride=2, padding=0)
self.conv3 = torch.nn.Conv2d(64, 4, kernel_size=3, stride=1, padding=0)
self.fc = torch.nn.Linear(4 * 3 * 3, output)
def forward(self, x):
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
x = self.act(x)
x = self.conv3(x)
x = self.act(x)
x = x.view(-1, 36)
x = self.fc(x)
return x
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3', 'o3a', 'o4']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = self.act(o1)
o2 = self.conv2(o1a)
o2a = self.act(o2)
o3 = self.conv3(o2a)
o3a = self.act(o3)
o3a = x.view(-1, 36)
o4 = self.fc(o3a)
return o1, o1a, o2, o2a, o3, o3a, o4
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 144 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (10, 36), (36, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 30, 30), (7200, 900, 30, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 8, 30, 30), (7200, 900, 30, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_0[grid(28800)](buf1, primals_2,
buf2, 28800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 14, 14), (12544, 196, 14, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1),
torch.float32)
triton_poi_fused_convolution_mul_1[grid(50176)](buf4, primals_5,
buf5, 50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 12, 12), (576, 144, 12, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.
float32)
triton_poi_fused_convolution_mul_2[grid(2304)](buf7, primals_7,
buf8, 2304, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf8, (64, 36),
(36, 1), 0), reinterpret_tensor(primals_8, (36, 10), (1, 36), 0
), alpha=1, beta=1, out=buf9)
del primals_9
return (buf9, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5, buf7, reinterpret_tensor(buf8, (64, 36), (36, 1), 0),
primals_8)
def square(x):
return x * x
class Net2New(torch.nn.Module):
def __init__(self, act=square, output=10):
super().__init__()
self.act = act
self.conv1 = torch.nn.Conv2d(1, 8, kernel_size=5, stride=2, padding=0)
self.conv2 = torch.nn.Conv2d(8, 64, kernel_size=3, stride=2, padding=0)
self.conv3 = torch.nn.Conv2d(64, 4, kernel_size=3, stride=1, padding=0)
self.fc = torch.nn.Linear(4 * 3 * 3, output)
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3', 'o3a', 'o4']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = self.act(o1)
o2 = self.conv2(o1a)
o2a = self.act(o2)
o3 = self.conv3(o2a)
o3a = self.act(o3)
o3a = x.view(-1, 36)
o4 = self.fc(o3a)
return o1, o1a, o2, o2a, o3, o3a, o4
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
yxtj/henn
|
Net2
| false | 11,060 |
[
"MIT"
] | 0 |
5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
https://github.com/yxtj/henn/tree/5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
MutualInfoLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/w7/cw7te4gyxgldjwr5kar2cl6efhzshj3hghndyyllduthnsrvatef.py
# Topologically Sorted Source Nodes: [add, log, mul, sum_1, mean], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# add => add
# log => log
# mean => mean
# mul => mul
# sum_1 => sum_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-16), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_log_mean_mul_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mean_mul_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp1 = 1e-16
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 + tmp1
tmp17 = tl_math.log(tmp16)
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zp/czptmhxpgiksx3z6y6o6dlwwhqu5ularzql4lqqlwawpbcqvty2c.py
# Topologically Sorted Source Nodes: [marginal_prob], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# marginal_prob => mean_1
# Graph fragment:
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ex/cex7lqfhcyaj5lsagcmvpb6upfvwbdcji4g66lwm6dotfe5tgjvy.py
# Topologically Sorted Source Nodes: [add, log, mul, sum_1, mean, pixel_wise_ent, marginal_prob, add_1, log_1, mul_1, sum_2, mean_2, marginal_ent, mul_2, sub], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean, aten.neg, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# log => log
# log_1 => log_1
# marginal_ent => neg_1
# marginal_prob => mean_1
# mean => mean
# mean_2 => mean_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pixel_wise_ent => neg
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-16), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3]), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-16), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, %log_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, %mul_2), kwargs = {})
triton_per_fused_add_log_mean_mul_neg_sub_sum_2 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_sub_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_out_ptr0 + (0))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, 1])
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = 1e-16
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp2 * tmp5
tmp8 = tmp7 / tmp1
tmp9 = tmp8 + tmp3
tmp10 = tl_math.log(tmp9)
tmp11 = tmp8 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 / tmp1
tmp15 = tmp14 + tmp3
tmp16 = tl_math.log(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 / tmp1
tmp21 = tmp20 + tmp3
tmp22 = tl_math.log(tmp21)
tmp23 = tmp20 * tmp22
tmp24 = tmp18 + tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tmp32 = -tmp31
tmp33 = 4.0
tmp34 = tmp27 / tmp33
tmp35 = -tmp34
tmp36 = 2.0
tmp37 = tmp35 * tmp36
tmp38 = tmp32 - tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [add, log, mul, sum_1, mean], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sum_0.run(arg0_1, buf0, 1, 64, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [marginal_prob], Original ATen: [aten.mean]
triton_per_fused_mean_1.run(arg0_1, buf1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, log, mul, sum_1, mean, pixel_wise_ent, marginal_prob, add_1, log_1, mul_1, sum_2, mean_2, marginal_ent, mul_2, sub], Original ATen: [aten.add, aten.log, aten.mul, aten.sum, aten.mean, aten.neg, aten.sub]
triton_per_fused_add_log_mean_mul_neg_sub_sum_2.run(buf3, buf1, 1, 4, grid=grid(1), stream=stream0)
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class MutualInfoLoss(nn.Module):
"""
Mutual Information Loss derived from ss-with-RIM that also applied in
this work.
First term enforces to generate a sparse nSpixel dimension vector for
each pixel; Second term indicates the cardinality of each spixel.
Args:
logits: torch.tensor
A trainable tensor of shape (b, nSpixel, h, w) that
represents the probability of each pixel belonging to all spixels.
It should be softmaxed before calling this loss funtion.
coef: float
A coefficient that controls the amplitude of second term.
"""
def __init__(self, coef=2):
super().__init__()
self.coef = coef
def forward(self, logits):
pixel_wise_ent = -(logits * torch.log(logits + 1e-16)).sum(1).mean()
marginal_prob = logits.mean((2, 3))
marginal_ent = -(marginal_prob * torch.log(marginal_prob + 1e-16)).sum(
1).mean()
return pixel_wise_ent - self.coef * marginal_ent
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_sum_0(in_ptr0, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = 1e-16
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp4 = tmp0 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 + tmp1
tmp17 = tl_math.log(tmp16)
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_sub_sum_2(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp28 = tl.load(in_out_ptr0 + 0)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, 1])
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp3 = 1e-16
tmp4 = tmp2 + tmp3
tmp5 = tl_math.log(tmp4)
tmp6 = tmp2 * tmp5
tmp8 = tmp7 / tmp1
tmp9 = tmp8 + tmp3
tmp10 = tl_math.log(tmp9)
tmp11 = tmp8 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 / tmp1
tmp15 = tmp14 + tmp3
tmp16 = tl_math.log(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 / tmp1
tmp21 = tmp20 + tmp3
tmp22 = tl_math.log(tmp21)
tmp23 = tmp20 * tmp22
tmp24 = tmp18 + tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp30 = 64.0
tmp31 = tmp29 / tmp30
tmp32 = -tmp31
tmp33 = 4.0
tmp34 = tmp27 / tmp33
tmp35 = -tmp34
tmp36 = 2.0
tmp37 = tmp35 * tmp36
tmp38 = tmp32 - tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_sum_0[grid(1)](arg0_1, buf0, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_mean_1[grid(16)](arg0_1, buf1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
buf3 = buf0
del buf0
triton_per_fused_add_log_mean_mul_neg_sub_sum_2[grid(1)](buf3, buf1,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf3,
class MutualInfoLossNew(nn.Module):
"""
Mutual Information Loss derived from ss-with-RIM that also applied in
this work.
First term enforces to generate a sparse nSpixel dimension vector for
each pixel; Second term indicates the cardinality of each spixel.
Args:
logits: torch.tensor
A trainable tensor of shape (b, nSpixel, h, w) that
represents the probability of each pixel belonging to all spixels.
It should be softmaxed before calling this loss funtion.
coef: float
A coefficient that controls the amplitude of second term.
"""
def __init__(self, coef=2):
super().__init__()
self.coef = coef
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
yueyu-stu/EdgeAwareSpixel
|
MutualInfoLoss
| false | 11,061 |
[
"MIT"
] | 0 |
f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
|
https://github.com/yueyu-stu/EdgeAwareSpixel/tree/f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
|
FocalLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/tj/ctjenrmpmnfu2d3pk5t7qu4ih2v4kysuba4mugrps3thgibhpgig.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_5, neg, sub_3, sub_4, sub_5
# focal_weight => mul_4
# loss => mul_6
# loss_1 => mean
# loss_cls => mul_7
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pred_sigmoid => sigmoid
# pt => add
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, %sub_4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.25), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.75), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %mul_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 0.25
tmp14 = tmp0 * tmp13
tmp15 = 0.75
tmp16 = tmp2 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tl.sigmoid(tmp3)
tmp19 = tmp1 - tmp18
tmp20 = tmp19 * tmp0
tmp21 = tmp18 * tmp2
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp17 * tmp23
tmp25 = tmp12 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = 256.0
tmp30 = tmp28 / tmp29
tmp31 = tmp30 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def convert_to_one_hot(targets: 'torch.Tensor', classes) ->torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert torch.max(targets).item(
) < classes, 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch.
long, device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25,
reduction='mean', avg_factor=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Defaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma
)
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class FocalLoss(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0
):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \\*), N or (N,1).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \\*). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if target.dim() == 1 or target.dim() == 2 and target.shape[1] == 1:
target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])
loss_cls = self.loss_weight * sigmoid_focal_loss(pred, target,
weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction,
avg_factor=avg_factor)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 0.25
tmp14 = tmp0 * tmp13
tmp15 = 0.75
tmp16 = tmp2 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tl.sigmoid(tmp3)
tmp19 = tmp1 - tmp18
tmp20 = tmp19 * tmp0
tmp21 = tmp18 * tmp2
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp17 * tmp23
tmp25 = tmp12 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = 256.0
tmp30 = tmp28 / tmp29
tmp31 = tmp30 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def convert_to_one_hot(targets: 'torch.Tensor', classes) ->torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert torch.max(targets).item(
) < classes, 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch.
long, device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25,
reduction='mean', avg_factor=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Defaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma
)
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class FocalLossNew(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0
):
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
youqingxiaozhua/ABAW3
|
FocalLoss
| false | 11,062 |
[
"Apache-2.0"
] | 0 |
51ab58ab311ecd6603a8485a45af0dcc39880e69
|
https://github.com/youqingxiaozhua/ABAW3/tree/51ab58ab311ecd6603a8485a45af0dcc39880e69
|
BertAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float("-inf")
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = (tmp29 != 0)
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = (tmp33 != 0)
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = (tmp38 != 0)
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
tl.store(out_ptr2 + (x2), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hk/chkirlrxzb52fxbrq2rynamgt7aligt77yn6j6ihfk46whjvd374.py
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add_1 => add_1
# pow_1 => pow_1
# s => mean_1
# sub => sub_1
# u => mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_5 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yq/cyqp2dtjka2kdwget3aoidhkk72sxga5xdfgoyaskxfpk55rnzdw.py
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# hidden_states_2 => add_3
# mul => mul
# sqrt => sqrt
# sub => sub_1
# u => mean
# x_3 => div_2
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_6 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_6.run(primals_11, buf13, primals_3, buf14, buf15, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del buf14
del buf15
del primals_12
return (buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
import torch.utils.data
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-05)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None):
self_output = self.self(input_tensor, attention_mask,
history_states=history_states)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_3,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11,
buf13, primals_3, buf14, buf15, primals_12, buf16, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root)."""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-05)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
zdxdsw/WebQA_VLP
|
BertAttention
| false | 11,063 |
[
"Apache-2.0"
] | 0 |
443bcd7e9b36db47e2ab4502abaaa3724800f394
|
https://github.com/zdxdsw/WebQA_VLP/tree/443bcd7e9b36db47e2ab4502abaaa3724800f394
|
LearnedPositionalEmbeddings
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/26/c26shuqxjhfilaoens27tm25ij3y3e5ctdjjfr7fuwafpcfebmdi.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %select), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((5000, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class LearnedPositionalEmbeddings(Module):
"""
<a id="LearnedPositionalEmbeddings">
## Add parameterized positional encodings
</a>
This adds learned positional embeddings to patch embeddings.
"""
def __init__(self, d_model: 'int', max_len: 'int'=5000):
"""
* `d_model` is the transformer embeddings size
* `max_len` is the maximum number of patches
"""
super().__init__()
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1,
d_model), requires_grad=True)
def forward(self, x: 'torch.Tensor'):
"""
* `x` is the patch embeddings of shape `[patches, batch_size, d_model]`
"""
pe = self.positional_encodings[x.shape[0]]
return x + pe
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnedPositionalEmbeddingsNew(Module):
"""
<a id="LearnedPositionalEmbeddings">
## Add parameterized positional encodings
</a>
This adds learned positional embeddings to patch embeddings.
"""
def __init__(self, d_model: 'int', max_len: 'int'=5000):
"""
* `d_model` is the transformer embeddings size
* `max_len` is the maximum number of patches
"""
super().__init__()
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1,
d_model), requires_grad=True)
def forward(self, input_0):
primals_1 = self.positional_encodings
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
LearnedPositionalEmbeddings
| false | 11,064 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
ClippedValueFunctionLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ux/cuxyq6jg627wu3buhxoipzlue5jekgs24d5ikz7kqoqhqlliw5dg.py
# Topologically Sorted Source Nodes: [sub_1, pow_1, sub, neg, clamp, clipped_value, sub_2, pow_2, vf_loss, mean, mul], Original ATen: [aten.sub, aten.pow, aten.neg, aten.clamp, aten.add, aten.maximum, aten.mean, aten.mul]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min
# clipped_value => add
# mean => mean
# mul => mul
# neg => neg
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# vf_loss => maximum
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg2_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.Tensor](args = (%sub, %neg), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.Tensor](args = (%clamp_min, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %clamp_max), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%pow_1, %pow_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%maximum,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0 = async_compile.triton('triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp4 = tl.load(in_ptr2 + (r0), None)
tmp6 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp0 - tmp4
tmp7 = -tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp9 = triton_helpers.minimum(tmp8, tmp6)
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp1
tmp12 = tmp11 * tmp11
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub_1, pow_1, sub, neg, clamp, clipped_value, sub_2, pow_2, vf_loss, mean, mul], Original ATen: [aten.sub, aten.pow, aten.neg, aten.clamp, aten.add, aten.maximum, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0.run(buf1, arg0_1, arg3_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.utils.data
import torch.nn.functional
import torch.autograd
class ClippedValueFunctionLoss(Module):
"""
## Clipped Value Function Loss
Similarly we clip the value function update also.
egin{align}
V^{\\pi_ heta}_{CLIP}(s_t)
&= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr)
\\
\\mathcal{L}^{VF}( heta)
&= rac{1}{2} \\mathbb{E} iggl[
max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2,
igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr)
iggr]
\\end{align}
Clipping makes sure the value function $V_ heta$ doesn't deviate
significantly from $V_{ heta_{OLD}}$.
"""
def forward(self, value: 'torch.Tensor', sampled_value: 'torch.Tensor',
sampled_return: 'torch.Tensor', clip: 'float'):
clipped_value = sampled_value + (value - sampled_value).clamp(min=-
clip, max=clip)
vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value -
sampled_return) ** 2)
return 0.5 * vf_loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp6 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp0 - tmp4
tmp7 = -tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp9 = triton_helpers.minimum(tmp8, tmp6)
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp1
tmp12 = tmp11 * tmp11
tmp13 = triton_helpers.maximum(tmp3, tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = 0.5
tmp20 = tmp18 * tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_maximum_mean_mul_neg_pow_sub_0[grid(1)](buf1
, arg0_1, arg3_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1
)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
class ClippedValueFunctionLossNew(Module):
"""
## Clipped Value Function Loss
Similarly we clip the value function update also.
egin{align}
V^{\\pi_ heta}_{CLIP}(s_t)
&= clip\\Bigl(V^{\\pi_ heta}(s_t) - \\hat{V_t}, -\\epsilon, +\\epsilon\\Bigr)
\\
\\mathcal{L}^{VF}( heta)
&= rac{1}{2} \\mathbb{E} iggl[
max\\Bigl(igl(V^{\\pi_ heta}(s_t) - R_tigr)^2,
igl(V^{\\pi_ heta}_{CLIP}(s_t) - R_tigr)^2\\Bigr)
iggr]
\\end{align}
Clipping makes sure the value function $V_ heta$ doesn't deviate
significantly from $V_{ heta_{OLD}}$.
"""
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
ClippedValueFunctionLoss
| false | 11,065 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
DPFP
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/nk/cnkn4azjs7p23wvexd663q3licov6bhbhf2h5uynp4fgwugfkg6g.py
# Topologically Sorted Source Nodes: [cat, x, roll, k, sum_1, add, truediv], Original ATen: [aten.cat, aten.relu, aten.roll, aten.mul, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add_1
# cat => cat
# k => mul
# roll => index
# sum_1 => sum_1
# truediv => div
# x => relu
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %neg], -1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%cat,), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%relu, [None, None, None, %fmod]), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %index), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
triton_per_fused_add_cat_div_mul_relu_roll_sum_0 = async_compile.triton('triton_per_fused_add_cat_div_mul_relu_roll_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cat_div_mul_relu_roll_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_cat_div_mul_relu_roll_sum_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = r1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x0) + r1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*x0) + ((-4) + r1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = -tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = (7 + r1) % 8
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tl.load(in_ptr0 + ((4*x0) + ((7 + r1) % 8)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp16 >= tmp3
tmp21 = tmp16 < tmp7
tmp22 = tl.load(in_ptr0 + ((4*x0) + ((-4) + ((7 + r1) % 8))), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = -tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp18, tmp19, tmp25)
tmp27 = triton_helpers.maximum(tmp14, tmp26)
tmp28 = tmp15 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.where(xmask, tmp29, 0)
tmp32 = tl.sum(tmp31, 1)[:, None]
tmp33 = 1e-06
tmp34 = tmp32 + tmp33
tmp35 = tmp28 / tmp34
tl.store(out_ptr2 + (r1 + (8*x0)), tmp35, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat, x, roll, k, sum_1, add, truediv], Original ATen: [aten.cat, aten.relu, aten.roll, aten.mul, aten.sum, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_cat_div_mul_relu_roll_sum_0.run(arg0_1, buf2, 64, 8, grid=grid(64), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class DPFP(Module):
"""
## Deterministic Parameter Free Project (DPFP)
This is the new projection function $\\color{lightgreen}{\\phi}$ introduced in the paper.
DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key}
u$,
where $
u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter.
$$\\color{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)}
= ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j}
ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$
where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of
size $2 d_{key}$, $i \\in \\{1, 2, ...,
u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$.
$x_i$ is the $i$-th element of vector $x$ and is rolled around if
$i$ is larger than the number of elements in $x$.
Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$.
This produces projections that are sparse (only a few elements of $phi$ are non-zero) and
orthogonal ($\\color{lightgreen}{\\phi(k^{(i)})} \\cdot \\color{lightgreen}{\\phi(k^{(j)})}
pprox 0$ for most $i, j$
unless $k^{(i)}$ and $k^{(j)}$ are very similar.
### Normalization
Paper introduces a simple normalization for $\\color{lightgreen}{\\phi}$,
$$\\color{lightgreen}{\\phi '(k)} =
rac{\\color{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} \\color{lightgreen}{\\phi(k)_j}}$$
*Check the paper for derivation.*
"""
def __init__(self, nu: 'int'=1, eps: 'float'=1e-06):
"""
* `nu` is the hyper-parameter $
u$.
* `eps` is the small value used to make sure there is no division-by-zero when normalizing.
"""
super().__init__()
self.nu = nu
self.relu = nn.ReLU()
self.eps = eps
def forward(self, k: 'torch.Tensor'):
k = self.dpfp(k)
return k / (torch.sum(k, dim=-1, keepdim=True) + self.eps)
def dpfp(self, k: 'torch.Tensor'):
"""
$$\\color{lightgreen}{\\phi(k)}$$
"""
x = self.relu(torch.cat([k, -k], dim=-1))
x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)]
x_rolled = torch.cat(x_rolled, dim=-1)
x_repeat = torch.cat([x] * self.nu, dim=-1)
return x_repeat * x_rolled
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_cat_div_mul_relu_roll_sum_0(in_ptr0, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x0 + (-4 + r1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = -tmp9
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp6, tmp10, tmp11)
tmp13 = tl.where(tmp4, tmp5, tmp12)
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = (7 + r1) % 8
tmp18 = tmp16 < tmp3
tmp19 = tl.load(in_ptr0 + (4 * x0 + (7 + r1) % 8), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp16 >= tmp3
tmp22 = tl.load(in_ptr0 + (4 * x0 + (-4 + (7 + r1) % 8)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = -tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp18, tmp19, tmp25)
tmp27 = triton_helpers.maximum(tmp14, tmp26)
tmp28 = tmp15 * tmp27
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.where(xmask, tmp29, 0)
tmp32 = tl.sum(tmp31, 1)[:, None]
tmp33 = 1e-06
tmp34 = tmp32 + tmp33
tmp35 = tmp28 / tmp34
tl.store(out_ptr2 + (r1 + 8 * x0), tmp35, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_cat_div_mul_relu_roll_sum_0[grid(64)](arg0_1,
buf2, 64, 8, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class DPFPNew(Module):
"""
## Deterministic Parameter Free Project (DPFP)
This is the new projection function $\\color{lightgreen}{\\phi}$ introduced in the paper.
DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key}
u$,
where $
u \\in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter.
$$\\color{lightgreen}{\\phi_{2 d_{key} (i - 1) + j}(k)}
= ext{ReLU}\\Big(ig[k, -kig]\\Big)_{j}
ext{ReLU}\\Big(ig[k, -kig]\\Big)_{i + j}$$
where $ig[k, -kig]$ is the concatenation of $k$ and $-k$ to give a vector of
size $2 d_{key}$, $i \\in \\{1, 2, ...,
u \\}$, and $j \\in \\{1, 2, ..., 2 d_{key}\\}$.
$x_i$ is the $i$-th element of vector $x$ and is rolled around if
$i$ is larger than the number of elements in $x$.
Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$.
This produces projections that are sparse (only a few elements of $phi$ are non-zero) and
orthogonal ($\\color{lightgreen}{\\phi(k^{(i)})} \\cdot \\color{lightgreen}{\\phi(k^{(j)})}
pprox 0$ for most $i, j$
unless $k^{(i)}$ and $k^{(j)}$ are very similar.
### Normalization
Paper introduces a simple normalization for $\\color{lightgreen}{\\phi}$,
$$\\color{lightgreen}{\\phi '(k)} =
rac{\\color{lightgreen}{\\phi(k)}}{\\sum^{d_{dot}}_{j=1} \\color{lightgreen}{\\phi(k)_j}}$$
*Check the paper for derivation.*
"""
def __init__(self, nu: 'int'=1, eps: 'float'=1e-06):
"""
* `nu` is the hyper-parameter $
u$.
* `eps` is the small value used to make sure there is no division-by-zero when normalizing.
"""
super().__init__()
self.nu = nu
self.relu = nn.ReLU()
self.eps = eps
def dpfp(self, k: 'torch.Tensor'):
"""
$$\\color{lightgreen}{\\phi(k)}$$
"""
x = self.relu(torch.cat([k, -k], dim=-1))
x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)]
x_rolled = torch.cat(x_rolled, dim=-1)
x_repeat = torch.cat([x] * self.nu, dim=-1)
return x_repeat * x_rolled
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
DPFP
| false | 11,066 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
DiscriminatorLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6k/c6keogbsbczhv7za5qvdnqtoyslxmpkn26o5ri5y2uquynatq3pp.py
# Topologically Sorted Source Nodes: [sub, relu, mean], Original ATen: [aten.rsub, aten.relu, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# relu => relu
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_mean_relu_rsub_0 = async_compile.triton('triton_per_fused_mean_relu_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_relu_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_relu_rsub_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lj/cljvcit4p6vjtwmejklnkzpuexfn22rbffq5dyzgt4sefx3jy3zk.py
# Topologically Sorted Source Nodes: [add, relu_1, mean_1], Original ATen: [aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# mean_1 => mean_1
# relu_1 => relu_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu_1,), kwargs = {})
triton_per_fused_add_mean_relu_1 = async_compile.triton('triton_per_fused_add_mean_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_relu_1(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, relu, mean], Original ATen: [aten.rsub, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_relu_rsub_0.run(buf2, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add, relu_1, mean_1], Original ATen: [aten.add, aten.relu, aten.mean]
triton_per_fused_add_mean_relu_1.run(buf3, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
return (buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.utils.data
import torch.nn.functional
import torch.autograd
class DiscriminatorLoss(Module):
"""
## Discriminator Loss
We want to find $w$ to maximize
$$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$,
so we minimize,
$$-rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) +
rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$
"""
def forward(self, f_real: 'torch.Tensor', f_fake: 'torch.Tensor'):
"""
* `f_real` is $f_w(x)$
* `f_fake` is $f_w(g_ heta(z))$
This returns the a tuple with losses for $f_w(x)$ and $f_w(g_ heta(z))$,
which are later added.
They are kept separate for logging.
"""
return F.relu(1 - f_real).mean(), F.relu(1 + f_fake).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_relu_rsub_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
@triton.jit
def triton_per_fused_add_mean_relu_1(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_relu_rsub_0[grid(1)](buf2, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused_add_mean_relu_1[grid(1)](buf3, arg1_1, 1, 256,
num_warps=2, num_stages=1)
del arg1_1
return buf2, buf3
class DiscriminatorLossNew(Module):
"""
## Discriminator Loss
We want to find $w$ to maximize
$$\\mathbb{E}_{x \\sim \\mathbb{P}_r} [f_w(x)]- \\mathbb{E}_{z \\sim p(z)} [f_w(g_ heta(z))]$$,
so we minimize,
$$-rac{1}{m} \\sum_{i=1}^m f_w ig(x^{(i)} ig) +
rac{1}{m} \\sum_{i=1}^m f_w ig( g_ heta(z^{(i)}) ig)$$
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
DiscriminatorLoss
| false | 11,067 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
PatchEmbeddings
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0), primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class PatchEmbeddings(Module):
"""
<a id="PatchEmbeddings">
## Get patch embeddings
</a>
The paper splits the image into patches of equal size and do a linear transformation
on the flattened pixels for each patch.
We implement the same thing through a convolution layer, because it's simpler to implement.
"""
def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'):
"""
* `d_model` is the transformer embeddings size
* `patch_size` is the size of the patch
* `in_channels` is the number of channels in the input image (3 for rgb)
"""
super().__init__()
self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride=
patch_size)
def forward(self, x: 'torch.Tensor'):
"""
* `x` is the input image of shape `[batch_size, channels, height, width]`
"""
x = self.conv(x)
bs, c, h, w = x.shape
x = x.permute(2, 3, 0, 1)
x = x.view(h * w, bs, c)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'patch_size': 4, 'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (1, 4, 4), (1, 4, 1), 0
), primals_1, primals_3
class PatchEmbeddingsNew(Module):
"""
<a id="PatchEmbeddings">
## Get patch embeddings
</a>
The paper splits the image into patches of equal size and do a linear transformation
on the flattened pixels for each patch.
We implement the same thing through a convolution layer, because it's simpler to implement.
"""
def __init__(self, d_model: 'int', patch_size: 'int', in_channels: 'int'):
"""
* `d_model` is the transformer embeddings size
* `patch_size` is the size of the patch
* `in_channels` is the number of channels in the input image (3 for rgb)
"""
super().__init__()
self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride=
patch_size)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
PatchEmbeddings
| false | 11,068 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
LSTMCell
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/lg/clgtnw2bgvc3mnanbov77yxt2a6ztfqy7qpuhudozzijvzjgbgly.py
# Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_next, sigmoid_2, tanh_1, h_next], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward]
# Source node to ATen node mapping:
# c_next => add_1
# h_next => mul_2
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid_2
# tanh => tanh
# tanh_1 => tanh_1
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_6), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_4), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp18 = tl.load(in_ptr3 + (x2), xmask)
tmp25 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp26 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp17 * tmp18
tmp20 = tmp5 * tmp11
tmp21 = tmp19 + tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp17
tmp24 = tmp17 * tmp23
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = libdevice.tanh(tmp21)
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp21, xmask)
tl.store(out_ptr3 + (x2), tmp24, xmask)
tl.store(out_ptr4 + (x2), tmp30, xmask)
tl.store(out_ptr5 + (x2), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_next, sigmoid_2, tanh_1, h_next], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0.run(buf0, primals_2, buf1, primals_6, buf2, buf3, buf4, buf7, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
return (buf6, buf4, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), buf2, buf3, buf4, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class LSTMCell(Module):
"""
## Long Short-Term Memory Cell
LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory,
and $h$ is like the short term memory.
We use the input $x$ and $h$ to update the long term memory.
In the update, some features of $c$ are cleared with a forget gate $f$,
and some features $i$ are added through a gate $g$.
The new short term memory is the $ anh$ of the long-term memory
multiplied by the output gate $o$.
Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it.
Also $c$ never goes through a linear transformation.
This is what solves vanishing and exploding gradients.
Here's the update rule.
egin{align}
c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\
h_t &= \\sigma(o_t) \\odot anh(c_t)
\\end{align}
$\\odot$ stands for element-wise multiplication.
Intermediate values and gates are computed as linear transformations of the hidden
state and input.
egin{align}
i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\
f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\
g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\
o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1})
\\end{align}
"""
def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm:
'bool'=False):
super().__init__()
self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size)
self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False)
if layer_norm:
self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for
_ in range(4)])
self.layer_norm_c = nn.LayerNorm(hidden_size)
else:
self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)])
self.layer_norm_c = nn.Identity()
def forward(self, x: 'torch.Tensor', h: 'torch.Tensor', c: 'torch.Tensor'):
ifgo = self.hidden_lin(h) + self.input_lin(x)
ifgo = ifgo.chunk(4, dim=-1)
ifgo = [self.layer_norm[i](ifgo[i]) for i in range(4)]
i, f, g, o = ifgo
c_next = torch.sigmoid(f) * c + torch.sigmoid(i) * torch.tanh(g)
h_next = torch.sigmoid(o) * torch.tanh(self.layer_norm_c(c_next))
return h_next, c_next
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr3 + x2, xmask)
tmp25 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp26 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp17 * tmp18
tmp20 = tmp5 * tmp11
tmp21 = tmp19 + tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp17
tmp24 = tmp17 * tmp23
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = libdevice.tanh(tmp21)
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
tl.store(out_ptr3 + x2, tmp24, xmask)
tl.store(out_ptr4 + x2, tmp30, xmask)
tl.store(out_ptr5 + x2, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(256)](
buf0, primals_2, buf1, primals_6, buf2, buf3, buf4, buf7, buf5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_2
return buf6, buf4, primals_6, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0
), buf2, buf3, buf4, buf5, buf7
class LSTMCellNew(Module):
"""
## Long Short-Term Memory Cell
LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory,
and $h$ is like the short term memory.
We use the input $x$ and $h$ to update the long term memory.
In the update, some features of $c$ are cleared with a forget gate $f$,
and some features $i$ are added through a gate $g$.
The new short term memory is the $ anh$ of the long-term memory
multiplied by the output gate $o$.
Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it.
Also $c$ never goes through a linear transformation.
This is what solves vanishing and exploding gradients.
Here's the update rule.
egin{align}
c_t &= \\sigma(f_t) \\odot c_{t-1} + \\sigma(i_t) \\odot anh(g_t) \\
h_t &= \\sigma(o_t) \\odot anh(c_t)
\\end{align}
$\\odot$ stands for element-wise multiplication.
Intermediate values and gates are computed as linear transformations of the hidden
state and input.
egin{align}
i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\
f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\
g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\
o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1})
\\end{align}
"""
def __init__(self, input_size: 'int', hidden_size: 'int', layer_norm:
'bool'=False):
super().__init__()
self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size)
self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False)
if layer_norm:
self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for
_ in range(4)])
self.layer_norm_c = nn.LayerNorm(hidden_size)
else:
self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)])
self.layer_norm_c = nn.Identity()
def forward(self, input_0, input_1, input_2):
primals_1 = self.hidden_lin.weight
primals_2 = self.hidden_lin.bias
primals_4 = self.input_lin.weight
primals_3 = input_0
primals_5 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
LSTMCell
| false | 11,069 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
ChannelNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/i7/ci7t7iiz7rvr7feeg7u3oqbndzrc2eexgichqwatlcys5unofv7u.py
# Topologically Sorted Source Nodes: [mean, pow_1, mean_x2, pow_2, var, sub_1, add, sqrt, x_norm, mul, x_norm_1], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mean_x2 => mean_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sqrt => sqrt
# sub_1 => sub_1
# var => sub
# x_norm => div
# x_norm_1 => add_1
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %pow_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_2), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + (0))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr2 + (0))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp20 = tmp0 - tmp11
tmp21 = tmp20 / tmp17
tmp22 = tmp19 * tmp21
tmp25 = tmp22 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp17, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, pow_1, mean_x2, pow_2, var, sub_1, add, sqrt, x_norm, mul, x_norm_1], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class ChannelNorm(Module):
"""
## Channel Normalization
This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise.
"""
def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool'
=True):
"""
* `groups` is the number of groups the features are divided into
* `channels` is the number of features in the input
* `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.channels = channels
self.groups = groups
self.eps = eps
self.affine = affine
if self.affine:
self.scale = nn.Parameter(torch.ones(groups))
self.shift = nn.Parameter(torch.zeros(groups))
def forward(self, x: 'torch.Tensor'):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
x_shape = x.shape
batch_size = x_shape[0]
assert self.channels == x.shape[1]
x = x.view(batch_size, self.groups, -1)
mean = x.mean(dim=[-1], keepdim=True)
mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True)
var = mean_x2 - mean ** 2
x_norm = (x - mean) / torch.sqrt(var + self.eps)
if self.affine:
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1,
-1, 1)
return x_norm.view(x_shape)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'groups': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + 0)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr2 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp20 = tmp0 - tmp11
tmp21 = tmp20 / tmp17
tmp22 = tmp19 * tmp21
tmp25 = tmp22 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp17, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1,
buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, buf1, buf3
class ChannelNormNew(Module):
"""
## Channel Normalization
This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise.
"""
def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool'
=True):
"""
* `groups` is the number of groups the features are divided into
* `channels` is the number of features in the input
* `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.channels = channels
self.groups = groups
self.eps = eps
self.affine = affine
if self.affine:
self.scale = nn.Parameter(torch.ones(groups))
self.shift = nn.Parameter(torch.zeros(groups))
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.shift
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
ChannelNorm
| false | 11,070 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
SmoothContourLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ju/cju54kifo4hw3wuq7dlrgf6bstltupxm2zadqshq3qw3krsedm2s.py
# Topologically Sorted Source Nodes: [di_dx, di_norm, min_1, min_2, max_1, max_2, di_dy, di_norm_2, min_3, min_4, max_3, max_4], Original ATen: [aten.sub, aten.pow, aten.min, aten.max]
# Source node to ATen node mapping:
# di_dx => sub_2
# di_dy => sub_3
# di_norm => pow_2
# di_norm_2 => pow_4
# max_1 => max_1
# max_2 => max_2
# max_3 => max_3
# max_4 => max_4
# min_1 => min_1
# min_2 => min_2
# min_3 => min_3
# min_4 => min_4
# Graph fragment:
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_7, %slice_8), kwargs = {})
# %pow_2 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%pow_2, -1, True), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%getitem, -2, True), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%pow_2, -1, True), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem_4, -2, True), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_9, %slice_11), kwargs = {})
# %pow_4 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %min_3 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%pow_4, -1, True), kwargs = {})
# %min_4 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%getitem_8, -2, True), kwargs = {})
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%pow_4, -1, True), kwargs = {})
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem_12, -2, True), kwargs = {})
triton_poi_fused_max_min_pow_sub_0 = async_compile.triton('triton_poi_fused_max_min_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_min_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_min_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp1 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = triton_helpers.minimum(tmp7, tmp10)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp17 = tmp13 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = triton_helpers.minimum(tmp15, tmp18)
tmp21 = tmp16 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = triton_helpers.minimum(tmp19, tmp22)
tmp24 = triton_helpers.minimum(tmp11, tmp23)
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp30 = tmp26 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = triton_helpers.minimum(tmp28, tmp31)
tmp34 = tmp29 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = triton_helpers.minimum(tmp32, tmp35)
tmp37 = triton_helpers.minimum(tmp24, tmp36)
tmp40 = tmp38 - tmp39
tmp41 = tmp40 * tmp40
tmp43 = tmp39 - tmp42
tmp44 = tmp43 * tmp43
tmp45 = triton_helpers.minimum(tmp41, tmp44)
tmp47 = tmp42 - tmp46
tmp48 = tmp47 * tmp47
tmp49 = triton_helpers.minimum(tmp45, tmp48)
tmp50 = triton_helpers.minimum(tmp37, tmp49)
tmp51 = triton_helpers.maximum(tmp3, tmp6)
tmp52 = triton_helpers.maximum(tmp51, tmp10)
tmp53 = triton_helpers.maximum(tmp15, tmp18)
tmp54 = triton_helpers.maximum(tmp53, tmp22)
tmp55 = triton_helpers.maximum(tmp52, tmp54)
tmp56 = triton_helpers.maximum(tmp28, tmp31)
tmp57 = triton_helpers.maximum(tmp56, tmp35)
tmp58 = triton_helpers.maximum(tmp55, tmp57)
tmp59 = triton_helpers.maximum(tmp41, tmp44)
tmp60 = triton_helpers.maximum(tmp59, tmp48)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp62 = tmp0 - tmp12
tmp63 = tmp62 * tmp62
tmp64 = tmp1 - tmp13
tmp65 = tmp64 * tmp64
tmp66 = triton_helpers.minimum(tmp63, tmp65)
tmp67 = tmp4 - tmp16
tmp68 = tmp67 * tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp70 = tmp8 - tmp20
tmp71 = tmp70 * tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp73 = tmp12 - tmp25
tmp74 = tmp73 * tmp73
tmp75 = tmp13 - tmp26
tmp76 = tmp75 * tmp75
tmp77 = triton_helpers.minimum(tmp74, tmp76)
tmp78 = tmp16 - tmp29
tmp79 = tmp78 * tmp78
tmp80 = triton_helpers.minimum(tmp77, tmp79)
tmp81 = tmp20 - tmp33
tmp82 = tmp81 * tmp81
tmp83 = triton_helpers.minimum(tmp80, tmp82)
tmp84 = triton_helpers.minimum(tmp72, tmp83)
tmp85 = tmp25 - tmp38
tmp86 = tmp85 * tmp85
tmp87 = tmp26 - tmp39
tmp88 = tmp87 * tmp87
tmp89 = triton_helpers.minimum(tmp86, tmp88)
tmp90 = tmp29 - tmp42
tmp91 = tmp90 * tmp90
tmp92 = triton_helpers.minimum(tmp89, tmp91)
tmp93 = tmp33 - tmp46
tmp94 = tmp93 * tmp93
tmp95 = triton_helpers.minimum(tmp92, tmp94)
tmp96 = triton_helpers.minimum(tmp84, tmp95)
tmp97 = triton_helpers.maximum(tmp63, tmp65)
tmp98 = triton_helpers.maximum(tmp97, tmp68)
tmp99 = triton_helpers.maximum(tmp98, tmp71)
tmp100 = triton_helpers.maximum(tmp74, tmp76)
tmp101 = triton_helpers.maximum(tmp100, tmp79)
tmp102 = triton_helpers.maximum(tmp101, tmp82)
tmp103 = triton_helpers.maximum(tmp99, tmp102)
tmp104 = triton_helpers.maximum(tmp86, tmp88)
tmp105 = triton_helpers.maximum(tmp104, tmp91)
tmp106 = triton_helpers.maximum(tmp105, tmp94)
tmp107 = triton_helpers.maximum(tmp103, tmp106)
tl.store(out_ptr0 + (x0), tmp50, xmask)
tl.store(out_ptr1 + (x0), tmp61, xmask)
tl.store(out_ptr2 + (x0), tmp96, xmask)
tl.store(out_ptr3 + (x0), tmp107, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/k7/ck7le7qd3solahpqjno6o6l7e3uldsqoepivgiqycntjv73kqy66.py
# Topologically Sorted Source Nodes: [di_dx, di_norm, di_dy, di_norm_2, dp_dx, abs_1, sum_1, pow_1, sum_2, neg, truediv, exp, mul, mean, smooth, dp_dy, abs_3, sum_5, pow_3, sum_6, neg_2, truediv_2, exp_1, mul_5, mean_2, smooth_1, mul_10, sub_4, sub_5, add_1, truediv_1, sum_3, di_norm_1, isValidEdges, di_valid, abs_2, sum_4, dp_valid, add_2, log, mul_4, mean_1, neg_1, contour, sub_6, sub_7, add_4, truediv_3, sum_7, di_norm_3, isValidEdges_1, di_valid_1, abs_4, sum_8, dp_valid_1, add_5, log_1, mul_9, mean_3, neg_3, contour_1, mul_11, add_6], Original ATen: [aten.sub, aten.pow, aten.abs, aten.sum, aten.neg, aten.div, aten.exp, aten.mul, aten.mean, aten.add, aten.gt, aten.log]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# abs_3 => abs_3
# abs_4 => abs_4
# add_1 => add_1
# add_2 => add_2
# add_4 => add_5
# add_5 => add_6
# add_6 => add_8
# contour => add_3
# contour_1 => add_7
# di_dx => sub_2
# di_dy => sub_3
# di_norm => pow_2
# di_norm_1 => mul_1
# di_norm_2 => pow_4
# di_norm_3 => mul_6
# di_valid => mul_3
# di_valid_1 => mul_8
# dp_dx => sub
# dp_dy => sub_1
# dp_valid => mul_2
# dp_valid_1 => mul_7
# exp => exp
# exp_1 => exp_1
# isValidEdges => gt
# isValidEdges_1 => gt_1
# log => log
# log_1 => log_1
# mean => mean
# mean_1 => mean_1
# mean_2 => mean_2
# mean_3 => mean_3
# mul => mul
# mul_10 => mul_10
# mul_11 => mul_11
# mul_4 => mul_4
# mul_5 => mul_5
# mul_9 => mul_9
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# neg_3 => neg_3
# pow_1 => pow_1
# pow_3 => pow_3
# smooth => add
# smooth_1 => add_4
# sub_4 => sub_4
# sub_5 => sub_5
# sub_6 => sub_6
# sub_7 => sub_7
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# sum_6 => sum_6
# sum_7 => sum_7
# sum_8 => sum_8
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# truediv_3 => div_3
# Graph fragment:
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_7, %slice_8), kwargs = {})
# %pow_2 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_9, %slice_11), kwargs = {})
# %pow_4 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 8), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %exp), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0.0), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_5), kwargs = {})
# %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_3, [1]), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_6,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_2, 8), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, %exp_1), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_5,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mean_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %getitem_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_6, %getitem_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, 1e-16), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %add_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_1, [1]), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 2), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_1, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %gt), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_2, [1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, %gt), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1e-16), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %log), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, 0.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_4, %getitem_10), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem_14, %getitem_10), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_7, 1e-16), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %add_5), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_3, [1]), kwargs = {})
# %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_7, 2), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_6, 1.0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %gt_1), kwargs = {})
# %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_4, [1]), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, %gt_1), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1e-16), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_6,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %log_1), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_9,), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_3,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %neg_3), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 1), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %mul_11), kwargs = {})
triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1 = async_compile.triton('triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 48, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 3
r1 = (rindex // 3) % 4
r2 = (rindex // 12)
r3 = rindex
r4 = rindex % 12
tmp0 = tl.load(in_ptr0 + (r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (17 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (33 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp15 = tl.load(in_ptr0 + (49 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp23 = tl.load(in_ptr1 + (16 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (17 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp28 = tl.load(in_ptr1 + (32 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (33 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp33 = tl.load(in_ptr1 + (48 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (49 + r0 + (4*r1) + (64*r2)), rmask, other=0.0)
tmp43 = tl.load(in_ptr2 + (4*r2), rmask, eviction_policy='evict_last', other=0.0)
tmp45 = tl.load(in_ptr3 + (4*r2), rmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr2 + (1 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp52 = tl.load(in_ptr3 + (1 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (2 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp59 = tl.load(in_ptr3 + (2 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.load(in_ptr2 + (3 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp66 = tl.load(in_ptr3 + (3 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp89 = tl.load(in_ptr0 + (r4 + (64*r2)), rmask, other=0.0)
tmp90 = tl.load(in_ptr0 + (4 + r4 + (64*r2)), rmask, other=0.0)
tmp93 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), rmask, other=0.0)
tmp94 = tl.load(in_ptr0 + (20 + r4 + (64*r2)), rmask, other=0.0)
tmp98 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), rmask, other=0.0)
tmp99 = tl.load(in_ptr0 + (36 + r4 + (64*r2)), rmask, other=0.0)
tmp103 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), rmask, other=0.0)
tmp104 = tl.load(in_ptr0 + (52 + r4 + (64*r2)), rmask, other=0.0)
tmp108 = tl.load(in_ptr1 + (r4 + (64*r2)), rmask, other=0.0)
tmp109 = tl.load(in_ptr1 + (4 + r4 + (64*r2)), rmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (16 + r4 + (64*r2)), rmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (20 + r4 + (64*r2)), rmask, other=0.0)
tmp117 = tl.load(in_ptr1 + (32 + r4 + (64*r2)), rmask, other=0.0)
tmp118 = tl.load(in_ptr1 + (36 + r4 + (64*r2)), rmask, other=0.0)
tmp122 = tl.load(in_ptr1 + (48 + r4 + (64*r2)), rmask, other=0.0)
tmp123 = tl.load(in_ptr1 + (52 + r4 + (64*r2)), rmask, other=0.0)
tmp131 = tl.load(in_ptr4 + (4*r2), rmask, eviction_policy='evict_last', other=0.0)
tmp133 = tl.load(in_ptr5 + (4*r2), rmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.load(in_ptr4 + (1 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp139 = tl.load(in_ptr5 + (1 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp144 = tl.load(in_ptr4 + (2 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.load(in_ptr5 + (2 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp151 = tl.load(in_ptr4 + (3 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp153 = tl.load(in_ptr5 + (3 + (4*r2)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = -tmp37
tmp39 = 0.125
tmp40 = tmp38 * tmp39
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp18 * tmp41
tmp44 = tmp22 - tmp43
tmp46 = tmp45 - tmp43
tmp47 = 1e-16
tmp48 = tmp46 + tmp47
tmp49 = tmp44 / tmp48
tmp51 = tmp26 - tmp50
tmp53 = tmp52 - tmp50
tmp54 = tmp53 + tmp47
tmp55 = tmp51 / tmp54
tmp56 = tmp49 + tmp55
tmp58 = tmp31 - tmp57
tmp60 = tmp59 - tmp57
tmp61 = tmp60 + tmp47
tmp62 = tmp58 / tmp61
tmp63 = tmp56 + tmp62
tmp65 = tmp36 - tmp64
tmp67 = tmp66 - tmp64
tmp68 = tmp67 + tmp47
tmp69 = tmp65 / tmp68
tmp70 = tmp63 + tmp69
tmp71 = 2.0
tmp72 = tmp70 * tmp71
tmp73 = 1.0
tmp74 = tmp72 > tmp73
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp18 * tmp75
tmp77 = tmp72 * tmp75
tmp78 = tmp76 + tmp47
tmp79 = tl_math.log(tmp78)
tmp80 = tmp77 * tmp79
tmp81 = tl.broadcast_to(tmp80, [XBLOCK, RBLOCK])
tmp83 = tl.where(rmask, tmp81, 0)
tmp84 = tl.sum(tmp83, 1)[:, None]
tmp85 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp87 = tl.where(rmask, tmp85, 0)
tmp88 = tl.sum(tmp87, 1)[:, None]
tmp91 = tmp89 - tmp90
tmp92 = tl_math.abs(tmp91)
tmp95 = tmp93 - tmp94
tmp96 = tl_math.abs(tmp95)
tmp97 = tmp92 + tmp96
tmp100 = tmp98 - tmp99
tmp101 = tl_math.abs(tmp100)
tmp102 = tmp97 + tmp101
tmp105 = tmp103 - tmp104
tmp106 = tl_math.abs(tmp105)
tmp107 = tmp102 + tmp106
tmp110 = tmp108 - tmp109
tmp111 = tmp110 * tmp110
tmp114 = tmp112 - tmp113
tmp115 = tmp114 * tmp114
tmp116 = tmp111 + tmp115
tmp119 = tmp117 - tmp118
tmp120 = tmp119 * tmp119
tmp121 = tmp116 + tmp120
tmp124 = tmp122 - tmp123
tmp125 = tmp124 * tmp124
tmp126 = tmp121 + tmp125
tmp127 = -tmp126
tmp128 = tmp127 * tmp39
tmp129 = tl_math.exp(tmp128)
tmp130 = tmp107 * tmp129
tmp132 = tmp111 - tmp131
tmp134 = tmp133 - tmp131
tmp135 = tmp134 + tmp47
tmp136 = tmp132 / tmp135
tmp138 = tmp115 - tmp137
tmp140 = tmp139 - tmp137
tmp141 = tmp140 + tmp47
tmp142 = tmp138 / tmp141
tmp143 = tmp136 + tmp142
tmp145 = tmp120 - tmp144
tmp147 = tmp146 - tmp144
tmp148 = tmp147 + tmp47
tmp149 = tmp145 / tmp148
tmp150 = tmp143 + tmp149
tmp152 = tmp125 - tmp151
tmp154 = tmp153 - tmp151
tmp155 = tmp154 + tmp47
tmp156 = tmp152 / tmp155
tmp157 = tmp150 + tmp156
tmp158 = tmp157 * tmp71
tmp159 = tmp158 > tmp73
tmp160 = tmp159.to(tl.float32)
tmp161 = tmp107 * tmp160
tmp162 = tmp158 * tmp160
tmp163 = tmp161 + tmp47
tmp164 = tl_math.log(tmp163)
tmp165 = tmp162 * tmp164
tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK])
tmp168 = tl.where(rmask, tmp166, 0)
tmp169 = tl.sum(tmp168, 1)[:, None]
tmp170 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp172 = tl.where(rmask, tmp170, 0)
tmp173 = tl.sum(tmp172, 1)[:, None]
tmp174 = 48.0
tmp175 = tmp88 / tmp174
tmp176 = 0.0
tmp177 = tmp175 + tmp176
tmp178 = tmp173 / tmp174
tmp179 = tmp177 + tmp178
tmp180 = tmp179 * tmp73
tmp181 = tmp84 / tmp174
tmp182 = -tmp181
tmp183 = tmp182 + tmp176
tmp184 = tmp169 / tmp174
tmp185 = -tmp184
tmp186 = tmp183 + tmp185
tmp187 = tmp186 * tmp73
tmp188 = tmp180 + tmp187
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp188, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [di_dx, di_norm, min_1, min_2, max_1, max_2, di_dy, di_norm_2, min_3, min_4, max_3, max_4], Original ATen: [aten.sub, aten.pow, aten.min, aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_min_pow_sub_0.run(arg1_1, buf0, buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [di_dx, di_norm, di_dy, di_norm_2, dp_dx, abs_1, sum_1, pow_1, sum_2, neg, truediv, exp, mul, mean, smooth, dp_dy, abs_3, sum_5, pow_3, sum_6, neg_2, truediv_2, exp_1, mul_5, mean_2, smooth_1, mul_10, sub_4, sub_5, add_1, truediv_1, sum_3, di_norm_1, isValidEdges, di_valid, abs_2, sum_4, dp_valid, add_2, log, mul_4, mean_1, neg_1, contour, sub_6, sub_7, add_4, truediv_3, sum_7, di_norm_3, isValidEdges_1, di_valid_1, abs_4, sum_8, dp_valid_1, add_5, log_1, mul_9, mean_3, neg_3, contour_1, mul_11, add_6], Original ATen: [aten.sub, aten.pow, aten.abs, aten.sum, aten.neg, aten.div, aten.exp, aten.mul, aten.mean, aten.add, aten.gt, aten.log]
triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1.run(buf14, arg0_1, arg1_1, buf0, buf1, buf2, buf3, 1, 48, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del buf0
del buf1
del buf2
del buf3
return (buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class SmoothContourLoss(nn.Module):
"""
Loss function that contains smoothness loss derived from ss-with-RIM
and contour-aware loss.
Smoothness loss concerns about smoothness of local patterns, while
contour-aware loss is interested in whether two patches are divided.
Cross entropy (or KL divergence) is applied to calculate Contour-aware
loss. When calculating the gradients of probability, i.e. dp, and that
of image, i.e. di, we desire that the distribution of dp should appoximate
that of di.
Args:
logits: torch.tensor
A trainable tensor of shape (b, nSpixel, h, w)
It should be softmaxed before calling this loss function.
image: torch.tensor
A tensor derived from color channels of input with shape
(b, c, h, w)
sigma: float
Parameter of transformed Gaussian kernel size
weights: List[float]
A List contains 2 coefficients that control the amplitudes of
2 losses
thresh: float
Parameter for controling the amplitude of edge
margin: int
Parameter for finding edge width
"""
def __init__(self, sigma=2, weights=[1, 1], thresh=1.0, margin=1):
super().__init__()
self.sigma = 2 * sigma ** 2
self.weights = weights
self.thresh = thresh
self.margin = margin
def forward(self, logits, image):
dp, di = self.get_gradients(logits, image)
smooth = 0.0
contour = 0.0
for idx in range(len(dp)):
smooth += self.smooth_loss(dp[idx], di[idx])
contour += self.contour_loss(dp[idx], di[idx])
return self.weights[0] * smooth + self.weights[1] * contour
def get_gradients(self, logits, image):
dp_dx = logits[..., :-self.margin] - logits[..., self.margin:]
dp_dy = logits[..., :-self.margin, :] - logits[..., self.margin:, :]
di_dx = image[..., :-self.margin] - image[..., self.margin:]
di_dy = image[..., :-self.margin, :] - image[..., self.margin:, :]
return [dp_dx, dp_dy], [di_dx, di_dy]
def smooth_loss(self, dp, di):
return (dp.abs().sum(1) * (-di.pow(2).sum(1) / self.sigma).exp()).mean(
)
def contour_loss(self, dp, di):
di_norm = di.pow(2)
di_min = di_norm.min(-1, keepdim=True).values.min(-2, keepdim=True
).values
di_max = di_norm.max(-1, keepdim=True).values.max(-2, keepdim=True
).values
di_norm = ((di_norm - di_min) / (di_max - di_min + 1e-16)).sum(1) * 2
isValidEdges = di_norm > self.thresh
dp_valid = dp.abs().sum(1) * isValidEdges
di_valid = di_norm * isValidEdges
return -(di_valid * torch.log(dp_valid + 1e-16)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_min_pow_sub_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp1 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = triton_helpers.minimum(tmp7, tmp10)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp17 = tmp13 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = triton_helpers.minimum(tmp15, tmp18)
tmp21 = tmp16 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = triton_helpers.minimum(tmp19, tmp22)
tmp24 = triton_helpers.minimum(tmp11, tmp23)
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp30 = tmp26 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = triton_helpers.minimum(tmp28, tmp31)
tmp34 = tmp29 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = triton_helpers.minimum(tmp32, tmp35)
tmp37 = triton_helpers.minimum(tmp24, tmp36)
tmp40 = tmp38 - tmp39
tmp41 = tmp40 * tmp40
tmp43 = tmp39 - tmp42
tmp44 = tmp43 * tmp43
tmp45 = triton_helpers.minimum(tmp41, tmp44)
tmp47 = tmp42 - tmp46
tmp48 = tmp47 * tmp47
tmp49 = triton_helpers.minimum(tmp45, tmp48)
tmp50 = triton_helpers.minimum(tmp37, tmp49)
tmp51 = triton_helpers.maximum(tmp3, tmp6)
tmp52 = triton_helpers.maximum(tmp51, tmp10)
tmp53 = triton_helpers.maximum(tmp15, tmp18)
tmp54 = triton_helpers.maximum(tmp53, tmp22)
tmp55 = triton_helpers.maximum(tmp52, tmp54)
tmp56 = triton_helpers.maximum(tmp28, tmp31)
tmp57 = triton_helpers.maximum(tmp56, tmp35)
tmp58 = triton_helpers.maximum(tmp55, tmp57)
tmp59 = triton_helpers.maximum(tmp41, tmp44)
tmp60 = triton_helpers.maximum(tmp59, tmp48)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp62 = tmp0 - tmp12
tmp63 = tmp62 * tmp62
tmp64 = tmp1 - tmp13
tmp65 = tmp64 * tmp64
tmp66 = triton_helpers.minimum(tmp63, tmp65)
tmp67 = tmp4 - tmp16
tmp68 = tmp67 * tmp67
tmp69 = triton_helpers.minimum(tmp66, tmp68)
tmp70 = tmp8 - tmp20
tmp71 = tmp70 * tmp70
tmp72 = triton_helpers.minimum(tmp69, tmp71)
tmp73 = tmp12 - tmp25
tmp74 = tmp73 * tmp73
tmp75 = tmp13 - tmp26
tmp76 = tmp75 * tmp75
tmp77 = triton_helpers.minimum(tmp74, tmp76)
tmp78 = tmp16 - tmp29
tmp79 = tmp78 * tmp78
tmp80 = triton_helpers.minimum(tmp77, tmp79)
tmp81 = tmp20 - tmp33
tmp82 = tmp81 * tmp81
tmp83 = triton_helpers.minimum(tmp80, tmp82)
tmp84 = triton_helpers.minimum(tmp72, tmp83)
tmp85 = tmp25 - tmp38
tmp86 = tmp85 * tmp85
tmp87 = tmp26 - tmp39
tmp88 = tmp87 * tmp87
tmp89 = triton_helpers.minimum(tmp86, tmp88)
tmp90 = tmp29 - tmp42
tmp91 = tmp90 * tmp90
tmp92 = triton_helpers.minimum(tmp89, tmp91)
tmp93 = tmp33 - tmp46
tmp94 = tmp93 * tmp93
tmp95 = triton_helpers.minimum(tmp92, tmp94)
tmp96 = triton_helpers.minimum(tmp84, tmp95)
tmp97 = triton_helpers.maximum(tmp63, tmp65)
tmp98 = triton_helpers.maximum(tmp97, tmp68)
tmp99 = triton_helpers.maximum(tmp98, tmp71)
tmp100 = triton_helpers.maximum(tmp74, tmp76)
tmp101 = triton_helpers.maximum(tmp100, tmp79)
tmp102 = triton_helpers.maximum(tmp101, tmp82)
tmp103 = triton_helpers.maximum(tmp99, tmp102)
tmp104 = triton_helpers.maximum(tmp86, tmp88)
tmp105 = triton_helpers.maximum(tmp104, tmp91)
tmp106 = triton_helpers.maximum(tmp105, tmp94)
tmp107 = triton_helpers.maximum(tmp103, tmp106)
tl.store(out_ptr0 + x0, tmp50, xmask)
tl.store(out_ptr1 + x0, tmp61, xmask)
tl.store(out_ptr2 + x0, tmp96, xmask)
tl.store(out_ptr3 + x0, tmp107, xmask)
@triton.jit
def triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 3
r1 = rindex // 3 % 4
r2 = rindex // 12
r4 = rindex % 12
tmp0 = tl.load(in_ptr0 + (r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (17 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (33 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp15 = tl.load(in_ptr0 + (49 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp23 = tl.load(in_ptr1 + (16 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (17 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp28 = tl.load(in_ptr1 + (32 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp29 = tl.load(in_ptr1 + (33 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp33 = tl.load(in_ptr1 + (48 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (49 + r0 + 4 * r1 + 64 * r2), rmask, other=0.0)
tmp43 = tl.load(in_ptr2 + 4 * r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp45 = tl.load(in_ptr3 + 4 * r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp50 = tl.load(in_ptr2 + (1 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp52 = tl.load(in_ptr3 + (1 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr2 + (2 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp59 = tl.load(in_ptr3 + (2 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp64 = tl.load(in_ptr2 + (3 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp66 = tl.load(in_ptr3 + (3 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp89 = tl.load(in_ptr0 + (r4 + 64 * r2), rmask, other=0.0)
tmp90 = tl.load(in_ptr0 + (4 + r4 + 64 * r2), rmask, other=0.0)
tmp93 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), rmask, other=0.0)
tmp94 = tl.load(in_ptr0 + (20 + r4 + 64 * r2), rmask, other=0.0)
tmp98 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), rmask, other=0.0)
tmp99 = tl.load(in_ptr0 + (36 + r4 + 64 * r2), rmask, other=0.0)
tmp103 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), rmask, other=0.0)
tmp104 = tl.load(in_ptr0 + (52 + r4 + 64 * r2), rmask, other=0.0)
tmp108 = tl.load(in_ptr1 + (r4 + 64 * r2), rmask, other=0.0)
tmp109 = tl.load(in_ptr1 + (4 + r4 + 64 * r2), rmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (16 + r4 + 64 * r2), rmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (20 + r4 + 64 * r2), rmask, other=0.0)
tmp117 = tl.load(in_ptr1 + (32 + r4 + 64 * r2), rmask, other=0.0)
tmp118 = tl.load(in_ptr1 + (36 + r4 + 64 * r2), rmask, other=0.0)
tmp122 = tl.load(in_ptr1 + (48 + r4 + 64 * r2), rmask, other=0.0)
tmp123 = tl.load(in_ptr1 + (52 + r4 + 64 * r2), rmask, other=0.0)
tmp131 = tl.load(in_ptr4 + 4 * r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp133 = tl.load(in_ptr5 + 4 * r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp137 = tl.load(in_ptr4 + (1 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp139 = tl.load(in_ptr5 + (1 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp144 = tl.load(in_ptr4 + (2 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp146 = tl.load(in_ptr5 + (2 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp151 = tl.load(in_ptr4 + (3 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp153 = tl.load(in_ptr5 + (3 + 4 * r2), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = -tmp37
tmp39 = 0.125
tmp40 = tmp38 * tmp39
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp18 * tmp41
tmp44 = tmp22 - tmp43
tmp46 = tmp45 - tmp43
tmp47 = 1e-16
tmp48 = tmp46 + tmp47
tmp49 = tmp44 / tmp48
tmp51 = tmp26 - tmp50
tmp53 = tmp52 - tmp50
tmp54 = tmp53 + tmp47
tmp55 = tmp51 / tmp54
tmp56 = tmp49 + tmp55
tmp58 = tmp31 - tmp57
tmp60 = tmp59 - tmp57
tmp61 = tmp60 + tmp47
tmp62 = tmp58 / tmp61
tmp63 = tmp56 + tmp62
tmp65 = tmp36 - tmp64
tmp67 = tmp66 - tmp64
tmp68 = tmp67 + tmp47
tmp69 = tmp65 / tmp68
tmp70 = tmp63 + tmp69
tmp71 = 2.0
tmp72 = tmp70 * tmp71
tmp73 = 1.0
tmp74 = tmp72 > tmp73
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp18 * tmp75
tmp77 = tmp72 * tmp75
tmp78 = tmp76 + tmp47
tmp79 = tl_math.log(tmp78)
tmp80 = tmp77 * tmp79
tmp81 = tl.broadcast_to(tmp80, [XBLOCK, RBLOCK])
tmp83 = tl.where(rmask, tmp81, 0)
tmp84 = tl.sum(tmp83, 1)[:, None]
tmp85 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp87 = tl.where(rmask, tmp85, 0)
tmp88 = tl.sum(tmp87, 1)[:, None]
tmp91 = tmp89 - tmp90
tmp92 = tl_math.abs(tmp91)
tmp95 = tmp93 - tmp94
tmp96 = tl_math.abs(tmp95)
tmp97 = tmp92 + tmp96
tmp100 = tmp98 - tmp99
tmp101 = tl_math.abs(tmp100)
tmp102 = tmp97 + tmp101
tmp105 = tmp103 - tmp104
tmp106 = tl_math.abs(tmp105)
tmp107 = tmp102 + tmp106
tmp110 = tmp108 - tmp109
tmp111 = tmp110 * tmp110
tmp114 = tmp112 - tmp113
tmp115 = tmp114 * tmp114
tmp116 = tmp111 + tmp115
tmp119 = tmp117 - tmp118
tmp120 = tmp119 * tmp119
tmp121 = tmp116 + tmp120
tmp124 = tmp122 - tmp123
tmp125 = tmp124 * tmp124
tmp126 = tmp121 + tmp125
tmp127 = -tmp126
tmp128 = tmp127 * tmp39
tmp129 = tl_math.exp(tmp128)
tmp130 = tmp107 * tmp129
tmp132 = tmp111 - tmp131
tmp134 = tmp133 - tmp131
tmp135 = tmp134 + tmp47
tmp136 = tmp132 / tmp135
tmp138 = tmp115 - tmp137
tmp140 = tmp139 - tmp137
tmp141 = tmp140 + tmp47
tmp142 = tmp138 / tmp141
tmp143 = tmp136 + tmp142
tmp145 = tmp120 - tmp144
tmp147 = tmp146 - tmp144
tmp148 = tmp147 + tmp47
tmp149 = tmp145 / tmp148
tmp150 = tmp143 + tmp149
tmp152 = tmp125 - tmp151
tmp154 = tmp153 - tmp151
tmp155 = tmp154 + tmp47
tmp156 = tmp152 / tmp155
tmp157 = tmp150 + tmp156
tmp158 = tmp157 * tmp71
tmp159 = tmp158 > tmp73
tmp160 = tmp159.to(tl.float32)
tmp161 = tmp107 * tmp160
tmp162 = tmp158 * tmp160
tmp163 = tmp161 + tmp47
tmp164 = tl_math.log(tmp163)
tmp165 = tmp162 * tmp164
tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK])
tmp168 = tl.where(rmask, tmp166, 0)
tmp169 = tl.sum(tmp168, 1)[:, None]
tmp170 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp172 = tl.where(rmask, tmp170, 0)
tmp173 = tl.sum(tmp172, 1)[:, None]
tmp174 = 48.0
tmp175 = tmp88 / tmp174
tmp176 = 0.0
tmp177 = tmp175 + tmp176
tmp178 = tmp173 / tmp174
tmp179 = tmp177 + tmp178
tmp180 = tmp179 * tmp73
tmp181 = tmp84 / tmp174
tmp182 = -tmp181
tmp183 = tmp182 + tmp176
tmp184 = tmp169 / tmp174
tmp185 = -tmp184
tmp186 = tmp183 + tmp185
tmp187 = tmp186 * tmp73
tmp188 = tmp180 + tmp187
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp188, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_min_pow_sub_0[grid(16)](arg1_1, buf0, buf1,
buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
triton_per_fused_abs_add_div_exp_gt_log_mean_mul_neg_pow_sub_sum_1[grid
(1)](buf14, arg0_1, arg1_1, buf0, buf1, buf2, buf3, 1, 48,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf0
del buf1
del buf2
del buf3
return buf14,
class SmoothContourLossNew(nn.Module):
"""
Loss function that contains smoothness loss derived from ss-with-RIM
and contour-aware loss.
Smoothness loss concerns about smoothness of local patterns, while
contour-aware loss is interested in whether two patches are divided.
Cross entropy (or KL divergence) is applied to calculate Contour-aware
loss. When calculating the gradients of probability, i.e. dp, and that
of image, i.e. di, we desire that the distribution of dp should appoximate
that of di.
Args:
logits: torch.tensor
A trainable tensor of shape (b, nSpixel, h, w)
It should be softmaxed before calling this loss function.
image: torch.tensor
A tensor derived from color channels of input with shape
(b, c, h, w)
sigma: float
Parameter of transformed Gaussian kernel size
weights: List[float]
A List contains 2 coefficients that control the amplitudes of
2 losses
thresh: float
Parameter for controling the amplitude of edge
margin: int
Parameter for finding edge width
"""
def __init__(self, sigma=2, weights=[1, 1], thresh=1.0, margin=1):
super().__init__()
self.sigma = 2 * sigma ** 2
self.weights = weights
self.thresh = thresh
self.margin = margin
def get_gradients(self, logits, image):
dp_dx = logits[..., :-self.margin] - logits[..., self.margin:]
dp_dy = logits[..., :-self.margin, :] - logits[..., self.margin:, :]
di_dx = image[..., :-self.margin] - image[..., self.margin:]
di_dy = image[..., :-self.margin, :] - image[..., self.margin:, :]
return [dp_dx, dp_dy], [di_dx, di_dy]
def smooth_loss(self, dp, di):
return (dp.abs().sum(1) * (-di.pow(2).sum(1) / self.sigma).exp()).mean(
)
def contour_loss(self, dp, di):
di_norm = di.pow(2)
di_min = di_norm.min(-1, keepdim=True).values.min(-2, keepdim=True
).values
di_max = di_norm.max(-1, keepdim=True).values.max(-2, keepdim=True
).values
di_norm = ((di_norm - di_min) / (di_max - di_min + 1e-16)).sum(1) * 2
isValidEdges = di_norm > self.thresh
dp_valid = dp.abs().sum(1) * isValidEdges
di_valid = di_norm * isValidEdges
return -(di_valid * torch.log(dp_valid + 1e-16)).mean()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
yueyu-stu/EdgeAwareSpixel
|
SmoothContourLoss
| false | 11,071 |
[
"MIT"
] | 0 |
f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
|
https://github.com/yueyu-stu/EdgeAwareSpixel/tree/f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
|
MaskedThing
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/nj/cnjpf6ikyusruwalragudnfd5g3oqe2yhq2iop3zz6xqijup5365.py
# Topologically Sorted Source Nodes: [relu, pred, pred_1, relu_1, target, target_1, mse_loss], Original ATen: [aten.relu, aten.log1p, aten.mul, aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# pred => log1p
# pred_1 => mul
# relu => relu
# relu_1 => relu_1
# target => log1p_1
# target_1 => mul_1
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%relu,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log1p, %arg2_1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg1_1,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%relu_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log1p_1, %arg2_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_log1p_mse_loss_mul_relu_0 = async_compile.triton('triton_per_fused_log1p_mse_loss_mul_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log1p_mse_loss_mul_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log1p_mse_loss_mul_relu_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = libdevice.log1p(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = libdevice.log1p(tmp7)
tmp9 = tmp8 * tmp4
tmp10 = tmp5 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [relu, pred, pred_1, relu_1, target, target_1, mse_loss], Original ATen: [aten.relu, aten.log1p, aten.mul, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_log1p_mse_loss_mul_relu_0.run(buf1, arg0_1, arg2_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskedThing(nn.Module):
l1 = nn.L1Loss()
mse = nn.MSELoss()
def forward(self, pred, target, mask):
pred = torch.log1p(F.relu(pred))
target = torch.log1p(F.relu(target))
pred = torch.mul(pred, mask)
target = torch.mul(target, mask)
return self.mse(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log1p_mse_loss_mul_relu_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = libdevice.log1p(tmp2)
tmp5 = tmp3 * tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = libdevice.log1p(tmp7)
tmp9 = tmp8 * tmp4
tmp10 = tmp5 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_log1p_mse_loss_mul_relu_0[grid(1)](buf1, arg0_1,
arg2_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MaskedThingNew(nn.Module):
l1 = nn.L1Loss()
mse = nn.MSELoss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
MaskedThing
| false | 11,072 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
PositionMSELoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/3b/c3b5hpcehwuslopeqy4ibldnps7dvkhasgmwug5db4wabsopolir.py
# Topologically Sorted Source Nodes: [pred, mse_loss], Original ATen: [aten.mul, aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# pred => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %expand), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r4 = rindex % 256
r0 = rindex % 16
r5 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r4), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r0 + (16*r5)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (r4), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 1024.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pred, mse_loss], Original ATen: [aten.mul, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, arg2_1, 1, 1024, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class PositionMSELoss(nn.Module):
mse = nn.MSELoss()
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask.unsqueeze(2))
return self.mse(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r4 = rindex % 256
r0 = rindex % 16
r5 = rindex // 64
tmp0 = tl.load(in_ptr0 + r4, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r0 + 16 * r5), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + r4, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 1024.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1,
arg2_1, 1, 1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class PositionMSELossNew(nn.Module):
mse = nn.MSELoss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
PositionMSELoss
| false | 11,073 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
CosineLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wk/cwktc7f6njmtgku5w5mzmcejslaxwwvrrkidq37gttzbhq2zct7v.py
# Topologically Sorted Source Nodes: [pred, cosine_similarity, sub, mean], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.sum, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# cosine_similarity => clamp_min, clamp_min_1, div, div_1, mul_1, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2, sum_3
# mean => mean
# pred => mul
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_1, %clamp_min), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand, %clamp_min_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sum_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16) % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r3), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (r0 + (64*r1)), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp2 / tmp18
tmp21 = tmp20 * tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.sqrt(tmp30)
tmp32 = triton_helpers.maximum(tmp31, tmp17)
tmp33 = tmp20 / tmp32
tmp34 = tmp19 * tmp33
tmp35 = tmp5 / tmp18
tmp36 = tmp22 / tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tmp9 / tmp18
tmp40 = tmp25 / tmp32
tmp41 = tmp39 * tmp40
tmp42 = tmp38 + tmp41
tmp43 = tmp13 / tmp18
tmp44 = tmp28 / tmp32
tmp45 = tmp43 * tmp44
tmp46 = tmp42 + tmp45
tmp47 = 1.0
tmp48 = tmp47 - tmp46
tmp49 = tl.broadcast_to(tmp48, [RBLOCK])
tmp51 = triton_helpers.promote_to_tensor(tl.sum(tmp49, 0))
tmp52 = 256.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp53, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [pred, cosine_similarity, sub, mean], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.sum, aten.rsub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0.run(buf3, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class CosineLoss(nn.Module):
cos = nn.CosineSimilarity(dim=2, eps=1e-06)
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask.unsqueeze(2))
return (1.0 - self.cos(pred, target)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16 % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + r3, None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp2 / tmp18
tmp21 = tmp20 * tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.sqrt(tmp30)
tmp32 = triton_helpers.maximum(tmp31, tmp17)
tmp33 = tmp20 / tmp32
tmp34 = tmp19 * tmp33
tmp35 = tmp5 / tmp18
tmp36 = tmp22 / tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tmp9 / tmp18
tmp40 = tmp25 / tmp32
tmp41 = tmp39 * tmp40
tmp42 = tmp38 + tmp41
tmp43 = tmp13 / tmp18
tmp44 = tmp28 / tmp32
tmp45 = tmp43 * tmp44
tmp46 = tmp42 + tmp45
tmp47 = 1.0
tmp48 = tmp47 - tmp46
tmp49 = tl.broadcast_to(tmp48, [RBLOCK])
tmp51 = triton_helpers.promote_to_tensor(tl.sum(tmp49, 0))
tmp52 = 256.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0[
grid(1)](buf3, arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CosineLossNew(nn.Module):
cos = nn.CosineSimilarity(dim=2, eps=1e-06)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
CosineLoss
| false | 11,074 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
MaskedLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/uq/cuqyuqsmjlbzrgrzegltdorx3hip5v6t3qklidxuwuuvfjnpakbb.py
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_1, %view_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp4 = tl.load(in_ptr2 + (r0), None)
tmp2 = libdevice.log1p(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = libdevice.log1p(tmp4)
tmp6 = tmp3 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss], Original ATen: [aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_0.run(buf1, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MaskedLoss(nn.Module):
mse = nn.MSELoss()
def forward(self, pred, target, mask):
pred = torch.log1p(pred).contiguous().view(-1)
target = torch.log1p(target).contiguous().view(-1)
mask = mask.view(-1)
pred = (mask * pred.T).T
return self.mse(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp4 = tl.load(in_ptr2 + r0, None)
tmp2 = libdevice.log1p(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = libdevice.log1p(tmp4)
tmp6 = tmp3 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_0[grid(1)](buf1, arg2_1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MaskedLossNew(nn.Module):
mse = nn.MSELoss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
MaskedLoss
| false | 11,075 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
MaskedSoftL1
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/al/calnzjzudg6lw6ndj4m5steab3eywole5zxaeybewjihk4n7ochl.py
# Topologically Sorted Source Nodes: [pred, truediv, truediv_1, smooth_l1_loss], Original ATen: [aten.mul, aten.div, aten.smooth_l1_loss]
# Source node to ATen node mapping:
# pred => mul
# smooth_l1_loss => abs_1, div_2, lt, mean, mul_1, pow_1, sub, sub_1, where
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 5), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg2_1, 5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_2, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
triton_per_fused_div_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_div_mul_smooth_l1_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp4 - tmp6
tmp8 = tl_math.abs(tmp7)
tmp9 = 1.0
tmp10 = tmp8 < tmp9
tmp11 = tmp8 * tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp8 - tmp12
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pred, truediv, truediv_1, smooth_l1_loss], Original ATen: [aten.mul, aten.div, aten.smooth_l1_loss]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MaskedSoftL1(nn.Module):
loss = nn.SmoothL1Loss()
def __init__(self, factor=5):
super().__init__()
self.factor = factor
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask)
return self.loss(pred / self.factor, target / self.factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp4 - tmp6
tmp8 = tl_math.abs(tmp7)
tmp9 = 1.0
tmp10 = tmp8 < tmp9
tmp11 = tmp8 * tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp9
tmp15 = tmp8 - tmp12
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg1_1,
arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MaskedSoftL1New(nn.Module):
loss = nn.SmoothL1Loss()
def __init__(self, factor=5):
super().__init__()
self.factor = factor
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
MaskedSoftL1
| false | 11,076 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
MaskedMSE
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/gj/cgjvbtnuirnhhesk44woixofw5axnk7m632jg3pmfhc4n645po6o.py
# Topologically Sorted Source Nodes: [pred, mse_loss], Original ATen: [aten.mul, aten.mse_loss]
# Source node to ATen node mapping:
# mse_loss => mean, pow_1, sub
# pred => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pred, mse_loss], Original ATen: [aten.mul, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MaskedMSE(nn.Module):
mse = nn.MSELoss()
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask)
return self.mse(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1,
arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MaskedMSENew(nn.Module):
mse = nn.MSELoss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
MaskedMSE
| false | 11,077 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
Squash
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ym/cym3ccudngmvatoe6w7myo62gop3lu5z7s5oobh4dsa5ywnpodvj.py
# Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# s2 => sum_1
# sqrt => sqrt
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = 1e-08
tmp16 = tmp10 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tmp19 = tmp13 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.utils.data
import torch.nn.functional
import torch.autograd
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = 1e-08
tmp16 = tmp10 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tmp19 = tmp13 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SquashNew(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
Squash
| false | 11,078 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
MarginLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/2l/c2lqz5eg4uounnzhu2xhx6kieol5nd5d676uuijy3vfi2vfxmwhd.py
# Topologically Sorted Source Nodes: [eye, labels, pow_1, sum_1, v_norm, sub, relu, mul, sub_1, mul_1, sub_2, relu_1, mul_2, loss], Original ATen: [aten.eye, aten.index, aten.pow, aten.sum, aten.sqrt, aten.rsub, aten.relu, aten.mul, aten.sub, aten.add]
# Source node to ATen node mapping:
# eye => eq, full_default, full_default_1, iota_1, where
# labels => index
# loss => add
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# relu => relu
# relu_1 => relu_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sum_1 => sum_1
# v_norm => sqrt
# Graph fragment:
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {})
# %index : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%where, [%arg1_1]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.9, %sqrt), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %relu), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %index), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sqrt, 0.1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %relu_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0 = async_compile.triton('triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tmp4
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 0.9
tmp25 = tmp24 - tmp23
tmp26 = tl.full([1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = tmp11 * tmp27
tmp29 = tmp9 - tmp11
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.1
tmp33 = tmp23 - tmp32
tmp34 = triton_helpers.maximum(tmp26, tmp33)
tmp35 = tmp31 * tmp34
tmp36 = tmp28 + tmp35
tl.store(out_ptr0 + (x3), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/nl/cnlounivewsan7neintygju4ung6fdl7sne7vd5zvvrfxzgid2yx.py
# Topologically Sorted Source Nodes: [sum_2, mean], Original ATen: [aten.sum, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# sum_2 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [-1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {})
triton_per_fused_mean_sum_1 = async_compile.triton('triton_per_fused_mean_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 16.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [eye, labels, pow_1, sum_1, v_norm, sub, relu, mul, sub_1, mul_1, sub_2, relu_1, mul_2, loss], Original ATen: [aten.eye, aten.index, aten.pow, aten.sum, aten.sqrt, aten.rsub, aten.relu, aten.mul, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_2, mean], Original ATen: [aten.sum, aten.mean]
triton_per_fused_mean_sum_1.run(buf2, buf0, 1, 16, grid=grid(1), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.nn.functional as F
import torch.utils.data
import torch.nn.functional
import torch.autograd
class MarginLoss(Module):
'\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n '
def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive:
float=0.9, m_negative: float=0.1):
super().__init__()
self.m_negative = m_negative
self.m_positive = m_positive
self.lambda_ = lambda_
self.n_labels = n_labels
def forward(self, v: 'torch.Tensor', labels: 'torch.Tensor'):
"""
`v`, $\\mathbf{v}_j$ are the squashed output capsules.
This has shape `[batch_size, n_labels, n_features]`; that is, there is a capsule for each label.
`labels` are the labels, and has shape `[batch_size]`.
"""
v_norm = torch.sqrt((v ** 2).sum(dim=-1))
labels = torch.eye(self.n_labels, device=labels.device)[labels]
loss = labels * F.relu(self.m_positive - v_norm) + self.lambda_ * (
1.0 - labels) * F.relu(v_norm - self.m_negative)
return loss.sum(dim=-1).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'n_labels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tmp4
tmp7 = x0
tmp8 = tmp6 == tmp7
tmp9 = 1.0
tmp10 = 0.0
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp13 = tmp12 * tmp12
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 0.9
tmp25 = tmp24 - tmp23
tmp26 = tl.full([1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = tmp11 * tmp27
tmp29 = tmp9 - tmp11
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.1
tmp33 = tmp23 - tmp32
tmp34 = triton_helpers.maximum(tmp26, tmp33)
tmp35 = tmp31 * tmp34
tmp36 = tmp28 + tmp35
tl.store(out_ptr0 + x3, tmp36, xmask)
@triton.jit
def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 16.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_eye_index_mul_pow_relu_rsub_sqrt_sub_sum_0[grid
(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_mean_sum_1[grid(1)](buf2, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
return buf2,
class MarginLossNew(Module):
'\n ## Margin loss for class existence\n\n A separate margin loss is used for each output capsule and the total loss is the sum of them.\n The length of each output capsule is the probability that class is present in the input.\n\n Loss for each output capsule or class $k$ is,\n $$\\mathcal{L}_k = T_k \\max(0, m^{+} - \\lVert\\mathbf{v}_k\rVert)^2 +\n \\lambda (1 - T_k) \\max(0, \\lVert\\mathbf{v}_k\rVert - m^{-})^2$$\n\n $T_k$ is $1$ if the class $k$ is present and $0$ otherwise.\n The first component of the loss is $0$ when the class is not present,\n and the second component is $0$ if the class is present.\n The $\\max(0, x)$ is used to avoid predictions going to extremes.\n $m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.\n\n The $\\lambda$ down-weighting is used to stop the length of all capsules from\n falling during the initial phase of training.\n '
def __init__(self, *, n_labels: int, lambda_: float=0.5, m_positive:
float=0.9, m_negative: float=0.1):
super().__init__()
self.m_negative = m_negative
self.m_positive = m_positive
self.lambda_ = lambda_
self.n_labels = n_labels
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
MarginLoss
| false | 11,079 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
MaskedL1
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/jx/cjxujfbiwfcrrdayvdu5vro2ewmwbp7cqumwyemtmkgxs4vrqfs5.py
# Topologically Sorted Source Nodes: [pred, target, l1_loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# l1_loss => abs_1, mean, sub
# pred => mul
# target => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pred, target, l1_loss], Original ATen: [aten.mul, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MaskedL1(nn.Module):
l1 = nn.L1Loss()
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask)
target = torch.mul(target, mask)
return self.l1(pred, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1,
arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MaskedL1New(nn.Module):
l1 = nn.L1Loss()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
vegetablejuiceftw/soft-pointer-networks
|
MaskedL1
| false | 11,080 |
[
"MIT"
] | 0 |
9705d9688b6b69db3948172771df4c367165c948
|
https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948
|
tfAvgPool3D
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/j4/cj47gjxddooj3qeijcb25ywjhwfl5gx75ggkyzuyl3muz5nxshah.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d]
# Source node to ATen node mapping:
# x => constant_pad_nd
# x_1 => avg_pool3d
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [0, 1, 0, 1], 0.0), kwargs = {})
# %avg_pool3d : [num_users=3] = call_function[target=torch.ops.aten.avg_pool3d.default](args = (%constant_pad_nd, [1, 3, 3], [1, 2, 2]), kwargs = {})
triton_poi_fused_avg_pool3d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool3d_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool3d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool3d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x3 = (xindex // 2)
x4 = xindex
tmp0 = 2*x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 2*x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((2*x0) + (8*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = 1 + (2*x0)
tmp8 = tmp7 < tmp1
tmp9 = tmp2 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x3)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = 2 + (2*x0)
tmp13 = tmp12 < tmp1
tmp14 = tmp2 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + (2*x0) + (8*x3)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp11
tmp17 = 1 + (2*x1)
tmp18 = tmp17 < tmp1
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x3)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 + tmp16
tmp22 = tmp18 & tmp8
tmp23 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 + tmp21
tmp25 = tmp18 & tmp13
tmp26 = tl.load(in_ptr0 + (6 + (2*x0) + (8*x3)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp24
tmp28 = 2 + (2*x1)
tmp29 = tmp28 < tmp1
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr0 + (8 + (2*x0) + (8*x3)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp27
tmp33 = tmp29 & tmp8
tmp34 = tl.load(in_ptr0 + (9 + (2*x0) + (8*x3)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp13
tmp37 = tl.load(in_ptr0 + (10 + (2*x0) + (8*x3)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 0.1111111111111111
tmp40 = tmp38 * tmp39
tl.store(out_ptr0 + (x4), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wu/cwumdhgny2yfc2fqegw3q6scmzh4l35mrpxzpy7u3hofzgr4dlgf.py
# Topologically Sorted Source Nodes: [mul, truediv, setitem, mul_1, truediv_1, setitem_1], Original ATen: [aten.mul, aten.div, aten.copy]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# setitem => copy
# setitem_1 => copy_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 9), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_1, %div), kwargs = {})
# %select_scatter_default : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%avg_pool3d, %copy, 3, -1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, 9), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 6), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_6, %div_1), kwargs = {})
# %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy_1, 2, -1), kwargs = {})
triton_poi_fused_copy_div_mul_1 = async_compile.triton('triton_poi_fused_copy_div_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_div_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_div_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x2 = (xindex // 4)
x4 = (xindex // 2)
x5 = xindex
tmp5 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (2*x4)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (x5), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 9.0
tmp7 = tmp5 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp11 * tmp6
tmp13 = tmp12 * tmp8
tmp15 = tmp14 * tmp6
tmp16 = tmp15 * tmp8
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tl.where(tmp2, tmp13, tmp18)
tl.store(out_ptr0 + (x5), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool3d_constant_pad_nd_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, setitem, mul_1, truediv_1, setitem_1], Original ATen: [aten.mul, aten.div, aten.copy]
triton_poi_fused_copy_div_mul_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from torch import nn
class tfAvgPool3D(nn.Module):
def __init__(self):
super().__init__()
self.avgf = nn.AvgPool3d((1, 3, 3), stride=(1, 2, 2))
def forward(self, x: 'Tensor') ->Tensor:
if x.shape[-1] != x.shape[-2]:
raise RuntimeError(
'only same shape for h and w are supported by avg with tf_like'
)
if x.shape[-1] != x.shape[-2]:
raise RuntimeError(
'only same shape for h and w are supported by avg with tf_like'
)
f1 = x.shape[-1] % 2 != 0
if f1:
padding_pad = 0, 0, 0, 0
else:
padding_pad = 0, 1, 0, 1
x = torch.nn.functional.pad(x, padding_pad)
if f1:
x = torch.nn.functional.avg_pool3d(x, (1, 3, 3), stride=(1, 2,
2), count_include_pad=False, padding=(0, 1, 1))
else:
x = self.avgf(x)
x[..., -1] = x[..., -1] * 9 / 6
x[..., -1, :] = x[..., -1, :] * 9 / 6
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool3d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = 2 * x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = 1 + 2 * x0
tmp8 = tmp7 < tmp1
tmp9 = tmp2 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp10 + tmp6
tmp12 = 2 + 2 * x0
tmp13 = tmp12 < tmp1
tmp14 = tmp2 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 2 * x0 + 8 * x3), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp15 + tmp11
tmp17 = 1 + 2 * x1
tmp18 = tmp17 < tmp1
tmp19 = tmp18 & tmp4
tmp20 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 + tmp16
tmp22 = tmp18 & tmp8
tmp23 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp23 + tmp21
tmp25 = tmp18 & tmp13
tmp26 = tl.load(in_ptr0 + (6 + 2 * x0 + 8 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp26 + tmp24
tmp28 = 2 + 2 * x1
tmp29 = tmp28 < tmp1
tmp30 = tmp29 & tmp4
tmp31 = tl.load(in_ptr0 + (8 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp27
tmp33 = tmp29 & tmp8
tmp34 = tl.load(in_ptr0 + (9 + 2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp13
tmp37 = tl.load(in_ptr0 + (10 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 0.1111111111111111
tmp40 = tmp38 * tmp39
tl.store(out_ptr0 + x4, tmp40, xmask)
@triton.jit
def triton_poi_fused_copy_div_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x2 = xindex // 4
x4 = xindex // 2
x5 = xindex
tmp5 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 2 * x4), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + x5, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x0
tmp4 = tmp3 == tmp1
tmp6 = 9.0
tmp7 = tmp5 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp11 * tmp6
tmp13 = tmp12 * tmp8
tmp15 = tmp14 * tmp6
tmp16 = tmp15 * tmp8
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tl.where(tmp2, tmp13, tmp18)
tl.store(out_ptr0 + x5, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool3d_constant_pad_nd_0[grid(64)](arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_copy_div_mul_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class tfAvgPool3DNew(nn.Module):
def __init__(self):
super().__init__()
self.avgf = nn.AvgPool3d((1, 3, 3), stride=(1, 2, 2))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zinzinhust96/MoViNet-pytorch
|
tfAvgPool3D
| false | 11,081 |
[
"MIT"
] | 0 |
f16528a76516427a192524c512c7a7cd8e1ce2f0
|
https://github.com/zinzinhust96/MoViNet-pytorch/tree/f16528a76516427a192524c512c7a7cd8e1ce2f0
|
LayerNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/2h/c2h2txhk3ykdisiefb54jqsezi5vinjuukyomno2gwqkbclbpmos.py
# Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, add, sig], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# mean_1 => mean_1
# mu => mean
# pow_1 => pow_1
# sig => sqrt
# sub => sub
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1, 2], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
triton_per_fused_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rc/crcabfhjr4qcbvwlzwp3w2ejmyb6myr6i2ryha26jny75xmhvgry.py
# Topologically Sorted Source Nodes: [sub, truediv, mul, add_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# mul => mul
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_3), kwargs = {})
triton_poi_fused_add_div_mul_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (4*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 1, 1, 4), (4, 4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, add, sig], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0.run(buf1, buf3, primals_1, 16, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, truediv, mul, add_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.add]
triton_poi_fused_add_div_mul_sub_1.run(primals_1, buf1, buf3, primals_2, primals_3, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf4, primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class LayerNorm(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.num_channels = num_channels
self.eps = eps
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
mu = torch.mean(x, axis=(1, 2), keepdims=True)
sig = torch.sqrt(torch.mean((x - mu) ** 2, axis=(1, 2), keepdims=
True) + self.eps)
return (x - mu) / sig * self.weight[:, None, None] + self.bias[:,
None, None]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 1, 1, 4), (4, 4, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0[grid(16)](buf1, buf3,
primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_sub_1[grid(256)](primals_1, buf1, buf3,
primals_2, primals_3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class LayerNormNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.num_channels = num_channels
self.eps = eps
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
yoshipon/spl2021_neural-fca
|
LayerNorm
| false | 11,082 |
[
"MIT"
] | 0 |
a316026667dd6bd888547c8348cab8cd3d88e84c
|
https://github.com/yoshipon/spl2021_neural-fca/tree/a316026667dd6bd888547c8348cab8cd3d88e84c
|
SqueezeExcitation
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/tu/ctuej2j6f3oxr5p43q7juhagc3r3ncgs2ikvxemutunlnxlnvl24.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# scale => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2, -3], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wr/cwr63j5areo6rblxaun4tckjxorcnwjjct6cyysiaunwcsuixprf.py
# Topologically Sorted Source Nodes: [scale_1, sigmoid, scale_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# scale_1 => convolution
# scale_2 => mul
# sigmoid => sigmoid
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# scale_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qt/cqtpqpkosxih5wqy33doxmejqlbvm3m27buf3idruiqchgn2slqd.py
# Topologically Sorted Source Nodes: [scale_4, mul_1], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# scale_4 => sigmoid_1
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_1), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1, 1), (8, 1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((8, 1, 1, 1), (1, 8, 8, 8), torch.float32)
# Topologically Sorted Source Nodes: [scale_1, sigmoid, scale_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_1.run(buf3, primals_3, buf4, 8, grid=grid(8), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 8, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_5, 4, grid=grid(4), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale_4, mul_1], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(buf6, primals_1, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), buf3, reinterpret_tensor(buf4, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1), 0), buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1, 1), (8, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
import torch.nn.functional as F
from typing import Optional
from torch import nn
def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None
) ->int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: 'Tensor') ->Tensor:
return x * torch.sigmoid(x)
class SqueezeExcitation(nn.Module):
def __init__(self, input_channels: 'int', squeeze_factor: 'int'=4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv3d(input_channels, squeeze_channels, (1, 1, 1))
self.swish = swish()
self.fc2 = nn.Conv3d(squeeze_channels, input_channels, (1, 1, 1))
def _scale(self, input: 'Tensor') ->Tensor:
scale = F.adaptive_avg_pool3d(input, 1)
scale = self.fc1(scale)
scale = self.swish(scale)
scale = self.fc2(scale)
return torch.sigmoid(scale)
def forward(self, input: 'Tensor') ->Tensor:
scale = self._scale(input)
return scale * input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import Tensor
import torch.nn.functional as F
from typing import Optional
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1, 1), (4, 1, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1, 1), (8, 1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 8, 1, 1, 1), (8, 1, 1, 1, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((8, 1, 1, 1), (1, 8, 8, 8), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_1[grid(8)](buf3, primals_3,
buf4, 8, XBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 8, 1,
1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1),
padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(4)](buf6, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](buf6, primals_1, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf7, primals_1, primals_2, primals_4, reinterpret_tensor(buf1,
(1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), buf3, reinterpret_tensor(buf4,
(1, 8, 1, 1, 1), (8, 1, 1, 1, 1), 0), buf6
def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None
) ->int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: 'Tensor') ->Tensor:
return x * torch.sigmoid(x)
class SqueezeExcitationNew(nn.Module):
def __init__(self, input_channels: 'int', squeeze_factor: 'int'=4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv3d(input_channels, squeeze_channels, (1, 1, 1))
self.swish = swish()
self.fc2 = nn.Conv3d(squeeze_channels, input_channels, (1, 1, 1))
def _scale(self, input: 'Tensor') ->Tensor:
scale = F.adaptive_avg_pool3d(input, 1)
scale = self.fc1(scale)
scale = self.swish(scale)
scale = self.fc2(scale)
return torch.sigmoid(scale)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
zinzinhust96/MoViNet-pytorch
|
SqueezeExcitation
| false | 11,083 |
[
"MIT"
] | 0 |
f16528a76516427a192524c512c7a7cd8e1ce2f0
|
https://github.com/zinzinhust96/MoViNet-pytorch/tree/f16528a76516427a192524c512c7a7cd8e1ce2f0
|
LayerScale
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/n3/cn35ktybwif7rqmutznvx4tpvoabmd7tz7tkzr4iou2t3kvpzivg.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class LayerScale(nn.Module):
"""Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
This rescales diagonaly residual outputs close to 0 initially, then learnt.
"""
def __init__(self, channels: 'int', init: 'float'=0):
super().__init__()
self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True))
self.scale.data[:] = init
def forward(self, x):
return self.scale[:, None] * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class LayerScaleNew(nn.Module):
"""Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
This rescales diagonaly residual outputs close to 0 initially, then learnt.
"""
def __init__(self, channels: 'int', init: 'float'=0):
super().__init__()
self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True))
self.scale.data[:] = init
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
xvdp/demucs
|
LayerScale
| false | 11,084 |
[
"MIT"
] | 0 |
0a5e3b72c6388801cf0086c2b84d09f6d73c389c
|
https://github.com/xvdp/demucs/tree/0a5e3b72c6388801cf0086c2b84d09f6d73c389c
|
Value
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5e/c5envgfxbi5empoowogb3vo4fdj6f4fsyqewhvop4uksap44zcmc.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 62720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uq/cuqpgnsthkkgujvarn4o2l4aaeotcllsgyuk6zwnpcl7ws6djdn4.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 3920)
x4 = xindex % 3920
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (3936*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (3968*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5e/c5enndaynbqkwwej2orjwjwzfhdam22j47qrwlomedi3ongxsfwd.py
# Topologically Sorted Source Nodes: [conv2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 50
x2 = (xindex // 5000)
x4 = xindex % 5000
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + (5024*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7g/c7gfh6x4k7yrennbculqcdxiyampnnfp6cicr23uuzodqpv75mnx.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 5000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5) % 250
x2 = (xindex // 1250)
x3 = xindex % 1250
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + (1280*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5v/c5v4jargh4hx6d66b56ackpaushbefm47j65o3s4gzgmum4og2yg.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_6 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4t/c4thk34fatjoz7vahlz3pzzotz6jgrj74jt24zo6zdqxo4hiltt7.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_7 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 32, 32), (1024, 1024, 32, 1))
assert_size_stride(primals_2, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (20, ), (1, ))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (500, 1250), (1250, 1))
assert_size_stride(primals_7, (500, ), (1, ))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10, ), (1, ))
assert_size_stride(primals_10, (1, 10), (10, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 28, 28), (15680, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 62720, grid=grid(62720), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 20, 14, 14), (3936, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 15680, grid=grid(15680), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 10, 10), (5000, 100, 10, 1))
buf5 = empty_strided_cuda((4, 50, 10, 10), (5024, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 20000, grid=grid(20000), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 5000, grid=grid(5000), stream=stream0)
buf8 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), reinterpret_tensor(primals_6, (1250, 500), (1, 1250), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 2000, grid=grid(2000), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (500, 10), (1, 500), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 40, grid=grid(40), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (10, 1), (1, 10), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (buf13, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 32, 32), (1024, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 20, 5, 5), (500, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((500, 1250), (1250, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 10), (10, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.nn import functional as F
class Value(nn.Module):
def __init__(self, state_size, fcs1_units=400, fc2_units=300):
super(Value, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(5 * 5 * 50, 500)
self.fc2 = nn.Linear(500, 10)
self.fc3 = nn.Linear(10, 1)
def forward(self, state):
x = state.view(-1, 1, 32, 32)
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 5 * 5 * 50)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 1, 32, 32])]
def get_init_inputs():
return [[], {'state_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 62720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 3920
x4 = xindex % 3920
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 3936 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 3968 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 20000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 50
x2 = xindex // 5000
x4 = xindex % 5000
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 5024 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 250
x2 = xindex // 1250
x3 = xindex % 1250
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 1280 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 32, 32), (1024, 1024, 32, 1))
assert_size_stride(primals_2, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (20,), (1,))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (500, 1250), (1250, 1))
assert_size_stride(primals_7, (500,), (1,))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10,), (1,))
assert_size_stride(primals_10, (1, 10), (10, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 28, 28), (15680, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(62720)](buf1, primals_3,
62720, XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20, 14, 14), (3936, 196, 14, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(15680)](buf1, buf2,
buf3, 15680, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 10, 10), (5000, 100, 10, 1))
buf5 = empty_strided_cuda((4, 50, 10, 10), (5024, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(20000)](buf4, primals_5,
buf5, 20000, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(5000)](buf5, buf6,
buf7, 5000, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0),
reinterpret_tensor(primals_6, (1250, 500), (1, 1250), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(2000)](buf9, primals_7, 2000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (500, 10), (1,
500), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(40)](buf11, primals_9, 40, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_9
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (10, 1), (1, 10), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (buf13, primals_2, primals_4, primals_1, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), buf9,
buf11, primals_10, primals_8, primals_6)
class ValueNew(nn.Module):
def __init__(self, state_size, fcs1_units=400, fc2_units=300):
super(ValueNew, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(5 * 5 * 50, 500)
self.fc2 = nn.Linear(500, 10)
self.fc3 = nn.Linear(10, 1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
zwc662/disentangling-vae
|
Value
| false | 11,085 |
[
"MIT"
] | 0 |
7eeace2a30f8034e222be6a906f53748b3b2bb6e
|
https://github.com/zwc662/disentangling-vae/tree/7eeace2a30f8034e222be6a906f53748b3b2bb6e
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/5e/c5envgfxbi5empoowogb3vo4fdj6f4fsyqewhvop4uksap44zcmc.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 62720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/uq/cuqpgnsthkkgujvarn4o2l4aaeotcllsgyuk6zwnpcl7ws6djdn4.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 3920)
x4 = xindex % 3920
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (3936*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (3968*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5e/c5enndaynbqkwwej2orjwjwzfhdam22j47qrwlomedi3ongxsfwd.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 50
x2 = (xindex // 5000)
x4 = xindex % 5000
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + (5024*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7g/c7gfh6x4k7yrennbculqcdxiyampnnfp6cicr23uuzodqpv75mnx.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 5000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5) % 250
x2 = (xindex // 1250)
x3 = xindex % 1250
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1) + (5024*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + (1280*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5v/c5v4jargh4hx6d66b56ackpaushbefm47j65o3s4gzgmum4og2yg.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/i3/ci3gjxynws4b6snihkmy4mi7mtg3xhsmd2rvhtmsnpenwlazlbbl.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_5 = async_compile.triton('triton_per_fused__log_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 1, 32, 32), (1024, 1024, 32, 1))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (500, 1250), (1250, 1))
assert_size_stride(primals_7, (500, ), (1, ))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 28, 28), (15680, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 62720, grid=grid(62720), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 20, 14, 14), (3936, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 15680, grid=grid(15680), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 10, 10), (5000, 100, 10, 1))
buf5 = empty_strided_cuda((4, 50, 10, 10), (5024, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 20000, grid=grid(20000), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 5000, grid=grid(5000), stream=stream0)
buf8 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), reinterpret_tensor(primals_6, (1250, 500), (1, 1250), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 2000, grid=grid(2000), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (500, 10), (1, 500), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_5.run(buf10, buf13, 4, 10, grid=grid(4), stream=stream0)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), buf9, buf13, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 32, 32), (1024, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 20, 5, 5), (500, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((500, 1250), (1250, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
from torch.nn import functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(5 * 5 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 5 * 5 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 62720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 15680
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 3920
x4 = xindex % 3920
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 3936 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 3968 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 20000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 50
x2 = xindex // 5000
x4 = xindex % 5000
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 5024 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 250
x2 = xindex // 1250
x3 = xindex % 1250
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1 + 5024 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 1280 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_5(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 1, 32, 32), (1024, 1024, 32, 1))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (500, 1250), (1250, 1))
assert_size_stride(primals_7, (500,), (1,))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 28, 28), (15680, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(62720)](buf1, primals_2,
62720, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 20, 14, 14), (3936, 196, 14, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 20, 14, 14), (3968, 196, 14, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(15680)](buf1, buf2,
buf3, 15680, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 10, 10), (5000, 100, 10, 1))
buf5 = empty_strided_cuda((4, 50, 10, 10), (5024, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(20000)](buf4, primals_5,
buf5, 20000, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 5, 5), (1280, 25, 5, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(5000)](buf5, buf6,
buf7, 5000, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0),
reinterpret_tensor(primals_6, (1250, 500), (1, 1250), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(2000)](buf9, primals_7, 2000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(500, 10), (1, 500), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_5[grid(4)](buf10, buf13, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 1250), (1280, 1), 0), buf9,
buf13, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(5 * 5 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
zwc662/disentangling-vae
|
Net
| false | 11,086 |
[
"MIT"
] | 0 |
7eeace2a30f8034e222be6a906f53748b3b2bb6e
|
https://github.com/zwc662/disentangling-vae/tree/7eeace2a30f8034e222be6a906f53748b3b2bb6e
|
Auto_Encoder_Model
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# output => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# output_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j6/cj6faeofhfnxsh5iuwazughjlau4igyajnmvjequyelq7apzs4qm.py
# Topologically Sorted Source Nodes: [conv2d_1, output_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# output_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# output_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/c3/cc37wvituo2asffgdbn2cnuhsr4nuj5pzt75pvxxxx4t7tdtdkqj.py
# Topologically Sorted Source Nodes: [conv2d_2, output_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# output_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/t7/ct7cf34m4g63ojfteengkc3tcdxkjvs4wde47kna4a7bol6sdtyb.py
# Topologically Sorted Source Nodes: [conv2d_4, output_8], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_4 => convolution_6
# output_8 => sigmoid
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_sigmoid_5 = async_compile.triton('triton_poi_fused_convolution_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, output_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, output_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, output_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf11, primals_9, 131072, grid=grid(131072), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, output_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf13, primals_11, 131072, grid=grid(131072), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, output_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf15, primals_13, 1048576, grid=grid(1048576), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, output_8], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_5.run(buf17, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf11, buf13, buf15, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Auto_Encoder_Model(nn.Module):
def __init__(self):
super(Auto_Encoder_Model, self).__init__()
self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3)
self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
def forward_pass(self, x):
output = F.relu(self.conv1(x))
output = self.max_pool1(output)
output = F.relu(self.conv2(output))
output = self.max_pool2(output)
output = F.relu(self.conv3(output))
return output
def reconstruct_pass(self, x):
output = F.relu(self.tran_conv1(x))
output = F.relu(self.conv4(output))
output = F.relu(self.tran_conv2(output))
output = torch.sigmoid(self.conv5(output))
return output
def forward(self, x):
output = self.forward_pass(x)
output = self.reconstruct_pass(output)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2,
buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6,
buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_2[grid(131072)](buf11, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_2[grid(131072)](buf13, primals_11,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_0[grid(1048576)](buf15,
primals_13, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_sigmoid_5[grid(16384)](buf17,
primals_15, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6,
buf7, buf9, buf11, buf13, buf15, buf17)
class Auto_Encoder_ModelNew(nn.Module):
def __init__(self):
super(Auto_Encoder_ModelNew, self).__init__()
self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3)
self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride=
2, padding=1, output_padding=1)
self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
def forward_pass(self, x):
output = F.relu(self.conv1(x))
output = self.max_pool1(output)
output = F.relu(self.conv2(output))
output = self.max_pool2(output)
output = F.relu(self.conv3(output))
return output
def reconstruct_pass(self, x):
output = F.relu(self.tran_conv1(x))
output = F.relu(self.conv4(output))
output = F.relu(self.tran_conv2(output))
output = torch.sigmoid(self.conv5(output))
return output
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.tran_conv1.weight
primals_9 = self.tran_conv1.bias
primals_10 = self.conv4.weight
primals_11 = self.conv4.bias
primals_12 = self.tran_conv2.weight
primals_13 = self.tran_conv2.bias
primals_14 = self.conv5.weight
primals_15 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
|
yutian-zhao/MICCAI19-MedVQA
|
Auto_Encoder_Model
| false | 11,087 |
[
"MIT"
] | 0 |
7df92c529ed87d67281efb2f568fc6c57cebfef1
|
https://github.com/yutian-zhao/MICCAI19-MedVQA/tree/7df92c529ed87d67281efb2f568fc6c57cebfef1
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/tt/cttq7otcuyqcifesznftwochvemvmdbxs3jqbyblqi7ltbv2ffif.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 288
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 9
y1 = (yindex // 9)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (9*x2) + (225*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lb/clb6vi2yqglui3rj2azxjqee3npwkoqhpqa5q3mntq7arpqrurq3.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 16384], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36
xnumel = 16384
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 9
y1 = (yindex // 9)
tmp0 = tl.load(in_ptr0 + (x2 + (16384*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (9*x2) + (147456*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xs/cxsyq4kjzmygvpoxfzfw4hzamfkj5tnlg4txbrqbf7aatlejhfwx.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qw/cqwg3xdwx3eugpxibvknyiosqdjdt7h7peu75twynersbsv447ra.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kh/ckhw3mz5iwg4zqohyvfxdn6j6w3kh4rpy6wz5evnyk4clgyi3ztv.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yo/cyovauwn7twwbuazglx2aerwegdionzbzxqd6doryx2scjsys4zi.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1968128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pb/cpbzvj23sjzrhh7mlkmjt5w7tkhuj4rrmiuck6ks2kulbnmk7k2r.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32) % 62
x2 = (xindex // 1984)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (7936*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (7936*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (3968 + x0 + (64*x1) + (7936*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4000 + x0 + (64*x1) + (7936*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5m/c5mt64wfpzxnvpwyaac74lprefmkl2rlhdpcj6ondy5w7nulrsrn.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vo/cvode3impirj6ekt6xnrbq27heai7gyypclozxgwuufmhep5akzp.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 29
x2 = (xindex // 1856)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (7424*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (7424*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (3712 + x0 + (128*x1) + (7424*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (3776 + x0 + (128*x1) + (7424*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jb/cjbc3qwb4l7cjiqqvai3oy4x2m7vdvbc2ecy5zlnxew5leczhvn3.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ft/cftu2srrpjc2sxl375uy2cgrjxhrnajyt6cwo5lssht4ysdisuuf.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 12
x2 = (xindex // 1536) % 12
x3 = (xindex // 18432)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (6400*x2) + (80000*x3)), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (6400*x2) + (80000*x3)), None)
tmp3 = tl.load(in_ptr0 + (3200 + x0 + (256*x1) + (6400*x2) + (80000*x3)), None)
tmp5 = tl.load(in_ptr0 + (3328 + x0 + (256*x1) + (6400*x2) + (80000*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, None)
tl.store(out_ptr1 + (x4), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lx/clxvms4hq26tsvprssavxtjdwd56o5l3alm52doxgzcn7cyo335z.py
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# relu_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4c/c4cgfrrnnjaqqu2mw37mbl7gs2lmyooybwlmphux5q3nafqldr5r.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y5 = yindex
y4 = (yindex // 16)
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2304 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (256*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (16*x2) + (4096*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dz/cdzfmjs25xuybljq7i6s7sbsl32ovxiapkuzeylwntb6ra5kv32n.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/om/comq4j2hqqnxlgxgi3ahxk3oqgj5eaid3nnx54t3vnpdsp67sjkn.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_14 = async_compile.triton('triton_poi_fused__softmax_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 9, 5, 5), (225, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 9, 128, 128), (147456, 16384, 128, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (2, 512), (512, 1))
assert_size_stride(primals_13, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 9, 5, 5), (225, 1, 45, 9), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 288, 25, grid=grid(288, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 9, 128, 128), (147456, 1, 1152, 9), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 36, 16384, grid=grid(36, 16384), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 25, grid=grid(32768, 25), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 124, 124), (492032, 1, 3968, 32))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 1968128, grid=grid(1968128), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32), torch.float32)
buf8 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 492032, grid=grid(492032), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 58, 58), (215296, 1, 3712, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 861184, grid=grid(861184), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 64, 29, 29), (53824, 1, 1856, 64), torch.float32)
buf12 = empty_strided_cuda((4, 64, 29, 29), (53824, 1, 1856, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 215296, grid=grid(215296), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 25, 25), (80000, 1, 3200, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 320000, grid=grid(320000), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128), torch.float32)
buf16 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 73728, grid=grid(73728), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 65536, grid=grid(65536), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.int8)
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 64, 256, grid=grid(64, 256), stream=stream0)
buf21 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (16, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0), out=buf21)
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf22, primals_11, 8192, grid=grid(8192), stream=stream0)
del primals_11
buf23 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(primals_12, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf23)
del primals_13
buf24 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_14.run(buf23, buf24, 32, grid=grid(32), stream=stream0)
del buf23
return (buf24, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (16, 1024), (1024, 1), 0), buf22, buf24, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 9, 5, 5), (225, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 9, 128, 128), (147456, 16384, 128, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, c1=32, c2=64, c3=128, c4=256, l1=512, d1=0.0):
super().__init__()
self.conv1 = nn.Conv2d(9, c1, (5, 5))
self.conv2 = nn.Conv2d(c1, c2, (5, 5))
self.conv3 = nn.Conv2d(c2, c3, (5, 5))
self.conv4 = nn.Conv2d(c3, c4, (5, 5))
x = torch.randn(900, 900).view(-1, 9, 100, 100)
self.toLinear = -1
self.convs(x)
self.fc1 = nn.Linear(self.toLinear, l1)
self.fc2 = nn.Linear(l1, 2)
self.dropout1 = nn.Dropout(d1)
def convs(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv4(x)), (2, 2))
if self.toLinear == -1:
self.toLinear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.toLinear)
x = F.relu(self.dropout1(self.fc1(x)))
x = self.fc2(x)
return F.softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 9, 128, 128])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 288
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 9
y1 = yindex // 9
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 9 * x2 + 225 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 9
y1 = yindex // 9
tmp0 = tl.load(in_ptr0 + (x2 + 16384 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 9 * x2 + 147456 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 62
x2 = xindex // 1984
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 7936 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 7936 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3968 + x0 + 64 * x1 + 7936 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4000 + x0 + 64 * x1 + 7936 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 29
x2 = xindex // 1856
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7424 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7424 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (3712 + x0 + 128 * x1 + 7424 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3776 + x0 + 128 * x1 + 7424 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 320000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 12
x2 = xindex // 1536 % 12
x3 = xindex // 18432
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 6400 * x2 + 80000 * x3), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 6400 * x2 + 80000 * x3),
None)
tmp3 = tl.load(in_ptr0 + (3200 + x0 + 256 * x1 + 6400 * x2 + 80000 * x3
), None)
tmp5 = tl.load(in_ptr0 + (3328 + x0 + 256 * x1 + 6400 * x2 + 80000 * x3
), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y5 = yindex
y4 = yindex // 16
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y0 + 4096 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2304 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 256 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 16 * x2 + 4096 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 9, 5, 5), (225, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 9, 128, 128), (147456, 16384, 128, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (2, 512), (512, 1))
assert_size_stride(primals_13, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 9, 5, 5), (225, 1, 45, 9), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(288, 25)](primals_1, buf0, 288, 25, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 9, 128, 128), (147456, 1, 1152, 9),
torch.float32)
triton_poi_fused_1[grid(36, 16384)](primals_3, buf1, 36, 16384,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 25)](primals_4, buf2, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 25)](primals_6, buf3, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 25)](primals_8, buf4, 32768, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 124, 124), (492032, 1, 3968, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(1968128)](buf6, primals_2,
1968128, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32),
torch.float32)
buf8 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(492032)](buf6, buf7,
buf8, 492032, XBLOCK=1024, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 58, 58), (215296, 1, 3712, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(861184)](buf10, primals_5,
861184, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 29, 29), (53824, 1, 1856, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 29, 29), (53824, 1, 1856, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(215296)](buf10,
buf11, buf12, 215296, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 25, 25), (80000, 1, 3200, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(320000)](buf14, primals_7,
320000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128),
torch.float32)
buf16 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(73728)](buf14,
buf15, buf16, 73728, XBLOCK=1024, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(65536)](buf18, primals_9,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.int8)
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(64, 256)](buf18,
buf19, buf20, 64, 256, XBLOCK=4, YBLOCK=64, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((16, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 1024), (1024, 1),
0), reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0),
out=buf21)
buf22 = buf21
del buf21
triton_poi_fused_relu_13[grid(8192)](buf22, primals_11, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_13, buf22, reinterpret_tensor(
primals_12, (512, 2), (1, 512), 0), alpha=1, beta=1, out=buf23)
del primals_13
buf24 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_14[grid(32)](buf23, buf24, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf23
return (buf24, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (16, 1024), (1024, 1), 0), buf22, buf24, primals_12, primals_10
)
class NetNew(nn.Module):
def __init__(self, c1=32, c2=64, c3=128, c4=256, l1=512, d1=0.0):
super().__init__()
self.conv1 = nn.Conv2d(9, c1, (5, 5))
self.conv2 = nn.Conv2d(c1, c2, (5, 5))
self.conv3 = nn.Conv2d(c2, c3, (5, 5))
self.conv4 = nn.Conv2d(c3, c4, (5, 5))
x = torch.randn(900, 900).view(-1, 9, 100, 100)
self.toLinear = -1
self.convs(x)
self.fc1 = nn.Linear(self.toLinear, l1)
self.fc2 = nn.Linear(l1, 2)
self.dropout1 = nn.Dropout(d1)
def convs(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv4(x)), (2, 2))
if self.toLinear == -1:
self.toLinear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
vtomi97/LHYP
|
Net
| false | 11,088 |
[
"MIT"
] | 0 |
3db91f889c0f6b866b9537975f664f072e021ea9
|
https://github.com/vtomi97/LHYP/tree/3db91f889c0f6b866b9537975f664f072e021ea9
|
LocalState
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/h4/ch4jp3nbphlqyivmlm7rppdekye6u3d2xp547gvt4mxbtmh3b7n7.py
# Topologically Sorted Source Nodes: [conv1d_2, sigmoid, decay_q_1], Original ATen: [aten.convolution, aten.sigmoid, aten.div]
# Source node to ATen node mapping:
# conv1d_2 => convolution_2
# decay_q_1 => div_1
# sigmoid => sigmoid
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1], [0], [1], False, [0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_6,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sigmoid, 2), kwargs = {})
triton_poi_fused_convolution_div_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_div_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_div_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_div_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tk/ctk4b5n4bdpdst6p72oshxchioeicxcaytfxy7l25p7gds47xhqd.py
# Topologically Sorted Source Nodes: [delta, neg, abs_1, mul, decay_kernel], Original ATen: [aten.sub, aten.neg, aten.abs, aten.mul, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# decay_kernel => div_2
# delta => sub
# mul => mul_3
# neg => neg
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view_7,), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %abs_1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 2.0), kwargs = {})
triton_poi_fused_abs_div_mul_neg_sub_2 = async_compile.triton('triton_poi_fused_abs_div_mul_neg_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_mul_neg_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_div_mul_neg_sub_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = ((-1)*(tl_math.abs(x0 + ((-1)*x1)))) + ((-1)*x2*(tl_math.abs(x0 + ((-1)*x1))))
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/l4/cl4jzvo5qffjiemdnn5ifpqaii7sona27e2rdso6g4qvmophjbtp.py
# Topologically Sorted Source Nodes: [indexes, eye], Original ATen: [aten.arange, aten.eye]
# Source node to ATen node mapping:
# eye => eq
# indexes => iota
# Graph fragment:
# %iota : [num_users=3] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_7, %iota), kwargs = {})
triton_poi_fused_arange_eye_3 = async_compile.triton('triton_poi_fused_arange_eye_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_eye_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_eye_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7g/c7gkavy5nnzth3x2wed3jp7uobijmye3esbvood6g5vxobbnhpde.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => amax
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_16, [2], True), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp0 = tl.load(in_ptr0 + (y0), ymask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1 + (64*y0)), xmask & ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (4 + y0), ymask, eviction_policy='evict_last').to(tl.int1)
tmp11 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (16 + x1 + (64*y0)), xmask & ymask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + y0), ymask, eviction_policy='evict_last').to(tl.int1)
tmp19 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr3 + (32 + x1 + (64*y0)), xmask & ymask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (12 + y0), ymask, eviction_policy='evict_last').to(tl.int1)
tmp27 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (48 + x1 + (64*y0)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tmp8 = -100.0
tmp9 = tl.where(tmp0, tmp8, tmp7)
tmp12 = tmp11 * tmp2
tmp13 = tmp12 * tmp4
tmp15 = tmp13 + tmp14
tmp16 = tl.where(tmp10, tmp8, tmp15)
tmp17 = triton_helpers.maximum(tmp9, tmp16)
tmp20 = tmp19 * tmp2
tmp21 = tmp20 * tmp4
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp18, tmp8, tmp23)
tmp25 = triton_helpers.maximum(tmp17, tmp24)
tmp28 = tmp27 * tmp2
tmp29 = tmp28 * tmp4
tmp31 = tmp29 + tmp30
tmp32 = tl.where(tmp26, tmp8, tmp31)
tmp33 = triton_helpers.maximum(tmp25, tmp32)
tl.store(out_ptr0 + (x1 + (16*y0)), tmp33, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hv/chvm4rmgmfzfa5n4luffgu22fvs76me3gexbqkdwxgjnuwui7bec.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => exp, sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_16, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
x2 = (xindex // 4)
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2 + (4*y0)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (y0 + (16*x2) + (64*x1)), xmask & ymask)
tmp10 = tl.load(in_ptr4 + (y0 + (16*x1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tmp8 = -100.0
tmp9 = tl.where(tmp0, tmp8, tmp7)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tl.store(out_ptr0 + (x3 + (16*y0)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ry/cryfskax4soldfj4gnwnzu4snp73auwk43upvbh6hu5xizgjdsfb.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => div_3, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jz/cjzjc4lixbxljbjfuzbxciq4awrywqb4wm75p5rsfj2upu4kymlo.py
# Topologically Sorted Source Nodes: [conv1d_4, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add_3
# conv1d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_23, %primals_10, %primals_11, [1], [0], [1], False, [0], 1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_4), kwargs = {})
triton_poi_fused_add_convolution_7 = async_compile.triton('triton_poi_fused_add_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 4), (64, 4, 1))
buf5 = buf4; del buf4 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d_2, sigmoid, decay_q_1], Original ATen: [aten.convolution, aten.sigmoid, aten.div]
triton_poi_fused_convolution_div_sigmoid_1.run(buf5, primals_7, buf7, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [delta, neg, abs_1, mul, decay_kernel], Original ATen: [aten.sub, aten.neg, aten.abs, aten.mul, aten.div]
triton_poi_fused_abs_div_mul_neg_sub_2.run(buf6, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (1, 4, 16), 0), reinterpret_tensor(buf7, (4, 4, 16), (1, 4, 16), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [indexes, eye], Original ATen: [aten.arange, aten.eye]
triton_poi_fused_arange_eye_3.run(buf9, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1, 4), (4, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf9, buf3, buf1, buf8, buf10, 4, 16, grid=grid(4, 16), stream=stream0)
buf11 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf3, buf1, buf8, buf10, buf11, 16, 16, grid=grid(16, 16), stream=stream0)
buf12 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf11
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(primals_1, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf14, primals_9, 64, grid=grid(64), stream=stream0)
del primals_9
buf15 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
# Topologically Sorted Source Nodes: [conv1d_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0), primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 4), (16, 4, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv1d_4, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_7.run(buf17, primals_1, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
return (buf17, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf9, buf12, reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (1, 16, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
class LocalState(nn.Module):
"""Local state allows to have attention based only on data (no positional embedding),
but while setting a constraint on the time window (e.g. decaying penalty term).
Also a failed experiments with trying to provide some frequency based attention.
"""
def __init__(self, channels: 'int', heads: 'int'=4, nfreqs: 'int'=0,
ndecay: 'int'=4):
super().__init__()
assert channels % heads == 0, (channels, heads)
self.heads = heads
self.nfreqs = nfreqs
self.ndecay = ndecay
self.content = nn.Conv1d(channels, channels, 1)
self.query = nn.Conv1d(channels, channels, 1)
self.key = nn.Conv1d(channels, channels, 1)
if nfreqs:
self.query_freqs = nn.Conv1d(channels, heads * nfreqs, 1)
if ndecay:
self.query_decay = nn.Conv1d(channels, heads * ndecay, 1)
self.query_decay.weight.data *= 0.01
assert self.query_decay.bias is not None
self.query_decay.bias.data[:] = -2
self.proj = nn.Conv1d(channels + heads * nfreqs, channels, 1)
def forward(self, x):
B, _C, T = x.shape
heads = self.heads
indexes = torch.arange(T, device=x.device, dtype=x.dtype)
delta = indexes[:, None] - indexes[None, :]
queries = self.query(x).view(B, heads, -1, T)
keys = self.key(x).view(B, heads, -1, T)
dots = torch.einsum('bhct,bhcs->bhts', keys, queries)
dots /= keys.shape[2] ** 0.5
if self.nfreqs:
periods = torch.arange(1, self.nfreqs + 1, device=x.device,
dtype=x.dtype)
freq_kernel = torch.cos(2 * math.pi * delta / periods.view(-1,
1, 1))
freq_q = self.query_freqs(x).view(B, heads, -1, T
) / self.nfreqs ** 0.5
dots += torch.einsum('fts,bhfs->bhts', freq_kernel, freq_q)
if self.ndecay:
decays = torch.arange(1, self.ndecay + 1, device=x.device,
dtype=x.dtype)
decay_q = self.query_decay(x).view(B, heads, -1, T)
decay_q = torch.sigmoid(decay_q) / 2
decay_kernel = -decays.view(-1, 1, 1) * delta.abs(
) / self.ndecay ** 0.5
dots += torch.einsum('fts,bhfs->bhts', decay_kernel, decay_q)
dots.masked_fill_(torch.eye(T, device=dots.device, dtype=torch.bool
), -100)
weights = torch.softmax(dots, dim=2)
content = self.content(x).view(B, heads, -1, T)
result = torch.einsum('bhts,bhct->bhcs', weights, content)
if self.nfreqs:
time_sig = torch.einsum('bhts,fts->bhfs', weights, freq_kernel)
result = torch.cat([result, time_sig], 2)
result = result.reshape(B, -1, T)
return x + self.proj(result)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_div_sigmoid_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_abs_div_mul_neg_sub_2(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = -1 * tl_math.abs(x0 + -1 * x1) + -1 * x2 * tl_math.abs(x0 + -1 * x1)
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_arange_eye_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = x0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp0 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + (x1 + 64 * y0), xmask & ymask, eviction_policy
='evict_last')
tmp10 = tl.load(in_ptr0 + (4 + y0), ymask, eviction_policy='evict_last'
).to(tl.int1)
tmp11 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr3 + (16 + x1 + 64 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + y0), ymask, eviction_policy='evict_last'
).to(tl.int1)
tmp19 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr3 + (32 + x1 + 64 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (12 + y0), ymask, eviction_policy='evict_last'
).to(tl.int1)
tmp27 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr3 + (48 + x1 + 64 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tmp8 = -100.0
tmp9 = tl.where(tmp0, tmp8, tmp7)
tmp12 = tmp11 * tmp2
tmp13 = tmp12 * tmp4
tmp15 = tmp13 + tmp14
tmp16 = tl.where(tmp10, tmp8, tmp15)
tmp17 = triton_helpers.maximum(tmp9, tmp16)
tmp20 = tmp19 * tmp2
tmp21 = tmp20 * tmp4
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp18, tmp8, tmp23)
tmp25 = triton_helpers.maximum(tmp17, tmp24)
tmp28 = tmp27 * tmp2
tmp29 = tmp28 * tmp4
tmp31 = tmp29 + tmp30
tmp32 = tl.where(tmp26, tmp8, tmp31)
tmp33 = triton_helpers.maximum(tmp25, tmp32)
tl.store(out_ptr0 + (x1 + 16 * y0), tmp33, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
x2 = xindex // 4
y0 = yindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (x1 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + (y0 + 16 * x2 + 64 * x1), xmask & ymask)
tmp10 = tl.load(in_ptr4 + (y0 + 16 * x1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp5 + tmp6
tmp8 = -100.0
tmp9 = tl.where(tmp0, tmp8, tmp7)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tl.store(out_ptr0 + (x3 + 16 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_convolution_7(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(64)](buf3, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(primals_1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 4), (64, 4, 1))
buf5 = buf4
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_div_sigmoid_1[grid(256)](buf5,
primals_7, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_abs_div_mul_neg_sub_2[grid(64)](buf6, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (1, 4, 16),
0), reinterpret_tensor(buf7, (4, 4, 16), (1, 4, 16), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_arange_eye_3[grid(16)](buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1, 4), (4, 1, 64, 16), torch.float32)
triton_poi_fused__softmax_4[grid(4, 16)](buf9, buf3, buf1, buf8,
buf10, 4, 16, XBLOCK=8, YBLOCK=4, num_warps=1, num_stages=1)
buf11 = buf7
del buf7
triton_poi_fused__softmax_5[grid(16, 16)](buf9, buf3, buf1, buf8,
buf10, buf11, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
buf12 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_6[grid(256)](buf11, buf12, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf11
buf13 = extern_kernels.convolution(primals_1, primals_8, stride=(1,
), padding=(0,), dilation=(1,), transposed=False,
output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 4), (16, 4, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_0[grid(64)](buf14, primals_9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf15 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0)
del buf10
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15)
buf16 = extern_kernels.convolution(reinterpret_tensor(buf15, (4, 4,
4), (16, 4, 1), 0), primals_10, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf16, (4, 4, 4), (16, 4, 1))
buf17 = buf16
del buf16
triton_poi_fused_add_convolution_7[grid(64)](buf17, primals_1,
primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
return (buf17, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf9, buf12, reinterpret_tensor(buf15,
(4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 1, 4), (4,
4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (1, 16, 4), 0))
class LocalStateNew(nn.Module):
"""Local state allows to have attention based only on data (no positional embedding),
but while setting a constraint on the time window (e.g. decaying penalty term).
Also a failed experiments with trying to provide some frequency based attention.
"""
def __init__(self, channels: 'int', heads: 'int'=4, nfreqs: 'int'=0,
ndecay: 'int'=4):
super().__init__()
assert channels % heads == 0, (channels, heads)
self.heads = heads
self.nfreqs = nfreqs
self.ndecay = ndecay
self.content = nn.Conv1d(channels, channels, 1)
self.query = nn.Conv1d(channels, channels, 1)
self.key = nn.Conv1d(channels, channels, 1)
if nfreqs:
self.query_freqs = nn.Conv1d(channels, heads * nfreqs, 1)
if ndecay:
self.query_decay = nn.Conv1d(channels, heads * ndecay, 1)
self.query_decay.weight.data *= 0.01
assert self.query_decay.bias is not None
self.query_decay.bias.data[:] = -2
self.proj = nn.Conv1d(channels + heads * nfreqs, channels, 1)
def forward(self, input_0):
primals_2 = self.content.weight
primals_3 = self.content.bias
primals_4 = self.query.weight
primals_5 = self.query.bias
primals_8 = self.key.weight
primals_9 = self.key.bias
primals_6 = self.query_decay.weight
primals_7 = self.query_decay.bias
primals_10 = self.proj.weight
primals_11 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
xvdp/demucs
|
LocalState
| false | 11,089 |
[
"MIT"
] | 0 |
0a5e3b72c6388801cf0086c2b84d09f6d73c389c
|
https://github.com/xvdp/demucs/tree/0a5e3b72c6388801cf0086c2b84d09f6d73c389c
|
Fcn8s
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4i/c4islqctnux7quywor4ljttjc6krtgvecvzfsjd2pvp4i6z2bufb.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2097152
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (25088*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/bf/cbfh7odtdwifwagwg5xc6g2vp55rb3wqj5vi7sq64lvbzbphjjy6.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_10 = async_compile.triton('triton_poi_fused_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 441
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 21
y1 = (yindex // 21)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (21*x2) + (336*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/v4/cv4mtrltl7xbiw7xp7gyevjrx2oo7z4atzenrrhwbng7yc3fjunc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_11 = async_compile.triton('triton_poi_fused_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 441
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 21
y1 = (yindex // 21)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (21*x2) + (5376*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/cp/ccp4ojszpovg6la422usrylat5keq24pc6vzzggj6xjcgfrzhcxg.py
# Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# h => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [100, 100], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xz/cxzlno4gfs3kc72eat6xpiu6xn5dr6yvc2opm5zd7zvxg7ggvnie.py
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 131
x2 = (xindex // 8384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (33536*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (33536*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + (128*x1) + (33536*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + (128*x1) + (33536*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7f/c7f3npybiepto4pskonp4rrflyi2drrmu7gy7zqiboknduhfvsfv.py
# Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# h_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/6v/c6vh6fohzdliwr7kxmm7qx6hmyvozyr5aky4loctm53mwljdb23x.py
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2230272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8448) % 66
x1 = (xindex // 128) % 66
x0 = xindex % 128
x3 = (xindex // 557568)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/t7/ct7w7cw5mqsq4ea34flessxkmsh4ej3cv3n6smkzfa27walntfeh.py
# Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# h_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4460544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/d2/cd2lbbktrlinq53ce3g2bvhqixfzcynec4yh52pbb5no5nvmictc.py
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_17 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = (xindex // 256) % 33
x2 = (xindex // 8448)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (33792*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (33792*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (33792*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + (512*x1) + (33792*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ka/ckakcvtkfyvarj6bygevbxelipfntuuwg3fl65cadqwzpxizcl7j.py
# Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# h_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2230272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7s/c7sk5lcaepj3olmug655q7hxtrgrqziu4e5f27yx7ka6vbwluk3s.py
# Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_13 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8704) % 17
x1 = (xindex // 512) % 17
x0 = xindex % 512
x3 = (xindex // 147968)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hx/chxwibttvyopiavs7og2i665mglcnendceewlhrfkqxdqprsj4r4.py
# Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# h_14 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7u/c7usmy4jmwhqvoypqaj7minodxefdgyuup7zbx3o5sdav22vppzn.py
# Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# h_17 => getitem_8, getitem_9
# Graph fragment:
# %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {})
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_21 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 4608) % 9
x1 = (xindex // 512) % 9
x0 = xindex % 512
x3 = (xindex // 41472)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp10, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x2)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp26, other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + (x6), tmp28, None)
tl.store(out_ptr1 + (x6), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/vf/cvf6cbxucccgtdj2kshu7xqtxofuulnihdlcxxkpyduukdvzdwro.py
# Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# h_18 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_22 = async_compile.triton('triton_poi_fused_convolution_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wk/cwkcdwwxdhhshevop452olzqo75rcfp3lilqps5ltgksqjd4fdkm.py
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# h_22 => convolution_15
# Graph fragment:
# %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_23 = async_compile.triton('triton_poi_fused_convolution_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/la/cla2beqdcgg4snq3yqcn3dqhkvstlp4ldskajxxn2q4oqykyeqp6.py
# Topologically Sorted Source Nodes: [h_23, h_26], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# h_23 => convolution_16
# h_26 => add
# Graph fragment:
# %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_15, %primals_34, %primals_35, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_16, %slice_4), kwargs = {})
triton_poi_fused_add_convolution_24 = async_compile.triton('triton_poi_fused_add_convolution_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_24(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 5376
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 21
x2 = (xindex // 168) % 8
x3 = (xindex // 1344)
x5 = xindex % 168
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1890 + x5 + (357*x2) + (6069*x3)), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/u4/cu4ctwx4xkgoycqbox7kboa3g7ncxnuxmh5aqjwkrc7qhui2uzzl.py
# Topologically Sorted Source Nodes: [h_30], Original ATen: [aten.add]
# Source node to ATen node mapping:
# h_30 => add_1
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_18, %slice_8), kwargs = {})
triton_poi_fused_add_25 = async_compile.triton('triton_poi_fused_add_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_25', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 27216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 378) % 18
x3 = (xindex // 6804)
x5 = xindex % 378
x0 = xindex % 21
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (6426 + x5 + (693*x2) + (22869*x3)), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/2t/c2ttncwunm72kiwhxkdewtelgwuxlj6ylo2cpsy2rbgxqqounx46.py
# Topologically Sorted Source Nodes: [h_32], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# h_32 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_12,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_26 = async_compile.triton('triton_poi_fused_clone_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 84
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = (xindex // 64)
y0 = yindex % 21
y1 = (yindex // 21)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (99603 + y0 + (21*x2) + (3192*x3) + (485184*y1)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + (4096*y4)), tmp0, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096, ), (1, ))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096, ), (1, ))
assert_size_stride(primals_32, (21, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (21, ), (1, ))
assert_size_stride(primals_34, (21, 21, 4, 4), (336, 16, 4, 1))
assert_size_stride(primals_35, (21, ), (1, ))
assert_size_stride(primals_36, (21, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_37, (21, ), (1, ))
assert_size_stride(primals_38, (21, 21, 4, 4), (336, 16, 4, 1))
assert_size_stride(primals_39, (21, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_40, (21, ), (1, ))
assert_size_stride(primals_41, (21, 21, 16, 16), (5376, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(primals_28, buf14, 2097152, 49, grid=grid(2097152, 49), stream=stream0)
del primals_28
buf15 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_10.run(primals_34, buf15, 441, 16, grid=grid(441, 16), stream=stream0)
del primals_34
buf16 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_10.run(primals_38, buf16, 441, 16, grid=grid(441, 16), stream=stream0)
del primals_38
buf17 = empty_strided_cuda((21, 21, 16, 16), (5376, 1, 336, 21), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_11.run(primals_41, buf17, 441, 256, grid=grid(441, 256), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d, h], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf19, primals_2, 17572864, grid=grid(17572864), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, h_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf21, primals_5, 17572864, grid=grid(17572864), stream=stream0)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.int8)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_13.run(buf21, buf22, buf23, 4393216, grid=grid(4393216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, h_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf25, primals_7, 8786432, grid=grid(8786432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, h_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf27, primals_9, 8786432, grid=grid(8786432), stream=stream0)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8)
# Topologically Sorted Source Nodes: [h_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_15.run(buf27, buf28, buf29, 2230272, grid=grid(2230272), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, h_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf31, primals_11, 4460544, grid=grid(4460544), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, h_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf33, primals_13, 4460544, grid=grid(4460544), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, h_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf35, primals_15, 4460544, grid=grid(4460544), stream=stream0)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8)
# Topologically Sorted Source Nodes: [h_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_17.run(buf35, buf36, buf37, 1115136, grid=grid(1115136), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, h_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf39, primals_17, 2230272, grid=grid(2230272), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, h_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf41, primals_19, 2230272, grid=grid(2230272), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42; del buf42 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, h_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf43, primals_21, 2230272, grid=grid(2230272), stream=stream0)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8)
# Topologically Sorted Source Nodes: [h_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_19.run(buf43, buf44, buf45, 591872, grid=grid(591872), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, h_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf47, primals_23, 591872, grid=grid(591872), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, h_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf49, primals_25, 591872, grid=grid(591872), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [conv2d_12, h_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf51, primals_27, 591872, grid=grid(591872), stream=stream0)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8)
# Topologically Sorted Source Nodes: [h_17], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_21.run(buf51, buf52, buf53, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, h_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_22.run(buf55, primals_29, 147456, grid=grid(147456), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [conv2d_14, h_20], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_22.run(buf57, primals_31, 147456, grid=grid(147456), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 21, 3, 3), (189, 1, 63, 21))
buf59 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [h_22], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf59, primals_33, 756, grid=grid(756), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [h_23], Original ATen: [aten.convolution]
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 21, 8, 8), (1344, 1, 168, 21))
# Topologically Sorted Source Nodes: [h_24], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf44, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 21, 17, 17), (6069, 1, 357, 21))
buf62 = buf60; del buf60 # reuse
# Topologically Sorted Source Nodes: [h_23, h_26], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_24.run(buf62, primals_35, buf61, primals_37, 5376, grid=grid(5376), stream=stream0)
del buf61
del primals_35
del primals_37
# Topologically Sorted Source Nodes: [h_27], Original ATen: [aten.convolution]
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 21, 18, 18), (6804, 1, 378, 21))
# Topologically Sorted Source Nodes: [h_28], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 21, 33, 33), (22869, 1, 693, 21))
buf65 = buf63; del buf63 # reuse
# Topologically Sorted Source Nodes: [h_30], Original ATen: [aten.add]
triton_poi_fused_add_25.run(buf65, buf64, primals_40, 27216, grid=grid(27216), stream=stream0)
del buf64
del primals_40
# Topologically Sorted Source Nodes: [h_31], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 21, 152, 152), (485184, 1, 3192, 21))
buf67 = empty_strided_cuda((4, 21, 64, 64), (86016, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_32], Original ATen: [aten.clone]
triton_poi_fused_clone_26.run(buf66, buf67, 84, 4096, grid=grid(84, 4096), stream=stream0)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, buf15, primals_36, buf16, primals_39, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51, buf52, buf53, buf55, buf57, buf59, buf62, buf65, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((4096, 512, 7, 7), (25088, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4096, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((21, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((21, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((21, 21, 4, 4), (336, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((21, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((21, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((21, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((21, 21, 4, 4), (336, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((21, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((21, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((21, 21, 16, 16), (5376, 256, 16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
import torch.nn as nn
def _upsampling_weights(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class Fcn8s(nn.Module):
def __init__(self, n_class=21):
"""
Create the FCN-8s network the the given number of classes.
Args:
n_class: The number of semantic classes.
"""
super(Fcn8s, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=True)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = _upsampling_weights(m.in_channels, m.
out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, image):
h = self.relu1_1(self.conv1_1(image))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h
h = self.score_pool4(pool4)
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h
h = upscore2 + score_pool4c
h = self.upscore_pool4(h)
upscore_pool4 = h
h = self.score_pool3(pool3)
h = h[:, :, 9:9 + upscore_pool4.size()[2], 9:9 + upscore_pool4.size
()[3]]
score_pool3c = h
h = upscore_pool4 + score_pool3c
h = self.upscore8(h)
h = h[:, :, 31:31 + image.size()[2], 31:31 + image.size()[3]
].contiguous()
return h
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 49
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 441
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 21
y1 = yindex // 21
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 21 * x2 + 336 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 441
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 21
y1 = yindex // 21
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 21 * x2 + 5376 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 17572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 131
x2 = xindex // 8384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8448 % 66
x1 = xindex // 128 % 66
x0 = xindex % 128
x3 = xindex // 557568
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 131, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 33
x2 = xindex // 8448
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 17
x1 = xindex // 512 % 17
x0 = xindex % 512
x3 = xindex // 147968
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 4608 % 9
x1 = xindex // 512 % 9
x0 = xindex % 512
x3 = xindex // 41472
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3),
tmp10, other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x2
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 *
x3), tmp26, other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tl.store(out_ptr0 + x6, tmp28, None)
tl.store(out_ptr1 + x6, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_24(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 5376
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 21
x2 = xindex // 168 % 8
x3 = xindex // 1344
x5 = xindex % 168
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1890 + x5 + 357 * x2 + 6069 * x3), xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 27216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 378 % 18
x3 = xindex // 6804
x5 = xindex % 378
x0 = xindex % 21
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (6426 + x5 + 693 * x2 + 22869 * x3), xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl
.constexpr, XBLOCK: tl.constexpr):
ynumel = 84
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex % 64
x3 = xindex // 64
y0 = yindex % 21
y1 = yindex // 21
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (99603 + y0 + 21 * x2 + 3192 * x3 + 485184 *
y1), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1))
assert_size_stride(primals_29, (4096,), (1,))
assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_31, (4096,), (1,))
assert_size_stride(primals_32, (21, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_33, (21,), (1,))
assert_size_stride(primals_34, (21, 21, 4, 4), (336, 16, 4, 1))
assert_size_stride(primals_35, (21,), (1,))
assert_size_stride(primals_36, (21, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_37, (21,), (1,))
assert_size_stride(primals_38, (21, 21, 4, 4), (336, 16, 4, 1))
assert_size_stride(primals_39, (21, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_40, (21,), (1,))
assert_size_stride(primals_41, (21, 21, 16, 16), (5376, 256, 16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_24
buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512),
torch.float32)
triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152,
49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1)
del primals_28
buf15 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch.
float32)
triton_poi_fused_10[grid(441, 16)](primals_34, buf15, 441, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_34
buf16 = empty_strided_cuda((21, 21, 4, 4), (336, 1, 84, 21), torch.
float32)
triton_poi_fused_10[grid(441, 16)](primals_38, buf16, 441, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf17 = empty_strided_cuda((21, 21, 16, 16), (5376, 1, 336, 21),
torch.float32)
triton_poi_fused_11[grid(441, 256)](primals_41, buf17, 441, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_41
buf18 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(100, 100), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_12[grid(17572864)](buf19,
primals_2, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_12[grid(17572864)](buf21,
primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.float32)
buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21,
buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_14[grid(8786432)](buf25,
primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_14[grid(8786432)](buf27,
primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.float32)
buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27,
buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_16[grid(4460544)](buf31,
primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_16[grid(4460544)](buf33,
primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_16[grid(4460544)](buf35,
primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.float32)
buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35,
buf36, buf37, 1115136, XBLOCK=1024, num_warps=4, num_stages=1)
buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_18[grid(2230272)](buf39,
primals_17, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf41 = buf40
del buf40
triton_poi_fused_convolution_relu_18[grid(2230272)](buf41,
primals_19, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512))
buf43 = buf42
del buf42
triton_poi_fused_convolution_relu_18[grid(2230272)](buf43,
primals_21, 2230272, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.float32)
buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43,
buf44, buf45, 591872, XBLOCK=512, num_warps=8, num_stages=1)
buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_20[grid(591872)](buf47,
primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_20[grid(591872)](buf49,
primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_20[grid(591872)](buf51,
primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.float32)
buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51,
buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1)
buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_22[grid(147456)](buf55,
primals_29, 147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_22[grid(147456)](buf57,
primals_31, 147456, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 21, 3, 3), (189, 1, 63, 21))
buf59 = buf58
del buf58
triton_poi_fused_convolution_23[grid(756)](buf59, primals_33, 756,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 21, 8, 8), (1344, 1, 168, 21))
buf61 = extern_kernels.convolution(buf44, primals_36, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 21, 17, 17), (6069, 1, 357, 21))
buf62 = buf60
del buf60
triton_poi_fused_add_convolution_24[grid(5376)](buf62, primals_35,
buf61, primals_37, 5376, XBLOCK=128, num_warps=4, num_stages=1)
del buf61
del primals_35
del primals_37
buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 21, 18, 18), (6804, 1, 378, 21))
buf64 = extern_kernels.convolution(buf36, primals_39, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 21, 33, 33), (22869, 1, 693, 21))
buf65 = buf63
del buf63
triton_poi_fused_add_25[grid(27216)](buf65, buf64, primals_40,
27216, XBLOCK=256, num_warps=4, num_stages=1)
del buf64
del primals_40
buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 21, 152, 152), (485184, 1, 3192, 21))
buf67 = empty_strided_cuda((4, 21, 64, 64), (86016, 4096, 64, 1),
torch.float32)
triton_poi_fused_clone_26[grid(84, 4096)](buf66, buf67, 84, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf66
return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8,
buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32,
buf15, primals_36, buf16, primals_39, buf17, buf19, buf21, buf22,
buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36,
buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51,
buf52, buf53, buf55, buf57, buf59, buf62, buf65)
def _upsampling_weights(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class Fcn8sNew(nn.Module):
def __init__(self, n_class=21):
"""
Create the FCN-8s network the the given number of classes.
Args:
n_class: The number of semantic classes.
"""
super(Fcn8sNew, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(n_class, n_class, 4, stride=2,
bias=True)
self.upscore8 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(n_class, n_class, 4, stride
=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = _upsampling_weights(m.in_channels, m.
out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.conv1_2.weight
primals_5 = self.conv1_2.bias
primals_6 = self.conv2_1.weight
primals_7 = self.conv2_1.bias
primals_8 = self.conv2_2.weight
primals_9 = self.conv2_2.bias
primals_10 = self.conv3_1.weight
primals_11 = self.conv3_1.bias
primals_12 = self.conv3_2.weight
primals_13 = self.conv3_2.bias
primals_14 = self.conv3_3.weight
primals_15 = self.conv3_3.bias
primals_16 = self.conv4_1.weight
primals_17 = self.conv4_1.bias
primals_18 = self.conv4_2.weight
primals_19 = self.conv4_2.bias
primals_20 = self.conv4_3.weight
primals_21 = self.conv4_3.bias
primals_22 = self.conv5_1.weight
primals_23 = self.conv5_1.bias
primals_24 = self.conv5_2.weight
primals_25 = self.conv5_2.bias
primals_26 = self.conv5_3.weight
primals_27 = self.conv5_3.bias
primals_28 = self.fc6.weight
primals_29 = self.fc6.bias
primals_30 = self.fc7.weight
primals_31 = self.fc7.bias
primals_32 = self.score_fr.weight
primals_33 = self.score_fr.bias
primals_39 = self.score_pool3.weight
primals_35 = self.score_pool3.bias
primals_36 = self.score_pool4.weight
primals_37 = self.score_pool4.bias
primals_34 = self.upscore2.weight
primals_40 = self.upscore2.bias
primals_41 = self.upscore8.weight
primals_38 = self.upscore_pool4.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41])
return output[0]
|
lxxue/cil-road-segmentation-2019
|
Fcn8s
| false | 11,090 |
[
"MIT"
] | 0 |
c6477556dc3d6d9c8ed2f2a3f185b4d986a03bb4
|
https://github.com/lxxue/cil-road-segmentation-2019/tree/c6477556dc3d6d9c8ed2f2a3f185b4d986a03bb4
|
Rescale
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/6h/c6hybfmwxtvyolqaeggoto7ue22rzujsr7g7mp27ap4ukdyrgoqw.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 3
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (1, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg1_1, arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Rescale(nn.Module):
def __init__(self, sign):
super(Rescale, self).__init__()
rgb_mean = 0.4488, 0.4371, 0.404
bias = sign * torch.Tensor(rgb_mean).reshape(1, 3, 1, 1)
self.bias = nn.Parameter(bias, requires_grad=False)
def forward(self, x):
return x + self.bias
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {'sign': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (1, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(192)](arg1_1, arg0_1, buf0, 192, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class RescaleNew(nn.Module):
def __init__(self, sign):
super(RescaleNew, self).__init__()
rgb_mean = 0.4488, 0.4371, 0.404
bias = sign * torch.Tensor(rgb_mean).reshape(1, 3, 1, 1)
self.bias = nn.Parameter(bias, requires_grad=False)
def forward(self, input_0):
arg0_1 = self.bias
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
zeta1999/torchSR
|
Rescale
| false | 11,091 |
[
"MIT"
] | 0 |
8f8154486f6c0f09942ccf86cdcbf496e2309d4e
|
https://github.com/zeta1999/torchSR/tree/8f8154486f6c0f09942ccf86cdcbf496e2309d4e
|
TransformerEncoderLayer_MLP
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/65/c65wfo7hcxr53m3y3jckxowtzw6fflb372nkguxrr2txfzcafqxb.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add_2, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_2), kwargs = {})
triton_poi_fused_gelu_2 = async_compile.triton('triton_poi_fused_gelu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/b5/cb5kfengtwtso2cktophcv2j56lrz4uz52si4koheb5liewfkvtt.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_1 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %view_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2048, 4), (4, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
triton_poi_fused_gelu_2.run(buf3, buf4, 131072, grid=grid(131072), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, buf2, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Identity
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class TransformerEncoderLayer_MLP(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.0, drop_path_rate=0.5, layerscale=0.0,
train_scale=True):
super(TransformerEncoderLayer_MLP, self).__init__()
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate
) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
self.layerscale = layerscale
if layerscale > 0.0:
if train_scale:
self.gamma = nn.Parameter(layerscale * torch.ones(d_model),
requires_grad=True)
else:
self.gamma = nn.Parameter(layerscale * torch.ones(d_model),
requires_grad=False)
def forward(self, src: 'torch.Tensor', *args, **kwargs) ->torch.Tensor:
src = self.norm1(src)
src2 = self.linear2(self.dropout1(self.activation(self.linear1(src))))
if self.layerscale > 0.0:
src = src + self.drop_path(self.gamma * self.dropout2(src2))
else:
src = src + self.drop_path(self.dropout2(src2))
return src
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Identity
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2048, 4), (4, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 2048), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
triton_poi_fused_gelu_2[grid(131072)](buf3, buf4, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0
), reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), out=buf5
)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, buf2, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 2048), (2048, 1), 0
), primals_6, primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class TransformerEncoderLayer_MLPNew(Module):
"""
Inspired by torch.nn.TransformerEncoderLayer and timm.
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
attention_dropout=0.0, drop_path_rate=0.5, layerscale=0.0,
train_scale=True):
super(TransformerEncoderLayer_MLPNew, self).__init__()
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout1 = Dropout(dropout)
self.norm1 = LayerNorm(d_model)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout2 = Dropout(dropout)
self.drop_path = DropPath(drop_path_rate
) if drop_path_rate > 0 else Identity()
self.activation = F.gelu
self.layerscale = layerscale
if layerscale > 0.0:
if train_scale:
self.gamma = nn.Parameter(layerscale * torch.ones(d_model),
requires_grad=True)
else:
self.gamma = nn.Parameter(layerscale * torch.ones(d_model),
requires_grad=False)
def forward(self, input_0):
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_6 = self.linear2.weight
primals_7 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yifanc96/yifanc-DL
|
TransformerEncoderLayer_MLP
| false | 11,092 |
[
"MIT"
] | 0 |
25d56cec776fb151c8f6bcbd997bca94f07f3597
|
https://github.com/yifanc96/yifanc-DL/tree/25d56cec776fb151c8f6bcbd997bca94f07f3597
|
OneDilate
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/op/copy2f6gim7y45rs55opaidayqffiwouugrapjdyrrg4gpti57t2.py
# Topologically Sorted Source Nodes: [sub, mul, x], Original ATen: [aten.rsub, aten.mul, aten.replication_pad2d]
# Source node to ATen node mapping:
# mul => mul
# sub => sub
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.5), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mul, [None, None, %clamp_max, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {})
triton_poi_fused_mul_replication_pad2d_rsub_0 = async_compile.triton('triton_poi_fused_mul_replication_pad2d_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_replication_pad2d_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_replication_pad2d_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 12
x2 = (xindex // 36) % 12
x3 = (xindex // 432)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + x2)) + ((-4) + x2) * (((-4) + x2) > (0))))) + (((0) * ((0) >= ((-4) + x2)) + ((-4) + x2) * (((-4) + x2) > (0)))) * ((((0) * ((0) >= ((-4) + x2)) + ((-4) + x2) * (((-4) + x2) > (0)))) < (3)))) + (16*x0) + (48*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + x1)) + ((-4) + x1) * (((-4) + x1) > (0))))) + (((0) * ((0) >= ((-4) + x1)) + ((-4) + x1) * (((-4) + x1) > (0)))) * ((((0) * ((0) >= ((-4) + x1)) + ((-4) + x1) * (((-4) + x1) > (0)))) < (3)))), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/la/clabqwk5mj352dqijgqvyakunfpf3vbxzryf4omtnadkhpucweyp.py
# Topologically Sorted Source Nodes: [sub, mul, x, conv2d], Original ATen: [aten.rsub, aten.mul, aten.replication_pad2d, aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# mul => mul
# sub => sub
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.5), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mul, [None, None, %clamp_max, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {})
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %arg1_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_mul_replication_pad2d_rsub_1 = async_compile.triton('triton_poi_fused_convolution_mul_replication_pad2d_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_replication_pad2d_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_replication_pad2d_rsub_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (27*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (9*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(arg1_1, (3, 1, 10, 10), (100, 100, 10, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 12, 12), (432, 1, 36, 3), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, x], Original ATen: [aten.rsub, aten.mul, aten.replication_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_replication_pad2d_rsub_0.run(arg0_1, buf0, 1728, grid=grid(1728), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [sub, mul, x, conv2d], Original ATen: [aten.rsub, aten.mul, aten.replication_pad2d, aten.convolution]
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf1, (4, 3, 3, 3), (27, 1, 9, 3))
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 3, 3, 3), (27, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, x, conv2d], Original ATen: [aten.rsub, aten.mul, aten.replication_pad2d, aten.convolution]
triton_poi_fused_convolution_mul_replication_pad2d_rsub_1.run(buf1, buf2, 12, 9, grid=grid(12, 9), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((3, 1, 10, 10), (100, 100, 10, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class OneDilate(nn.Module):
def __init__(self, kernel_size=10, channels=3, gpu=True):
super(OneDilate, self).__init__()
self.kernel_size = kernel_size
self.channels = channels
gaussian_kernel = torch.ones(1, 1, self.kernel_size, self.kernel_size)
gaussian_kernel = gaussian_kernel.repeat(self.channels, 1, 1, 1)
self.mean = (self.kernel_size - 1) // 2
self.gaussian_filter = nn.Conv2d(in_channels=self.channels,
out_channels=self.channels, kernel_size=self.kernel_size,
groups=self.channels, bias=False)
if gpu:
gaussian_kernel = gaussian_kernel
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, x):
x = F.pad((1 - x) * 0.5, (self.mean, self.mean, self.mean, self.
mean), 'replicate')
return self.gaussian_filter(x)
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_replication_pad2d_rsub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 12
x2 = xindex // 36 % 12
x3 = xindex // 432
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 0 * (0 >= -4 + x2) + (-4 + x2) *
(-4 + x2 > 0)) + (0 * (0 >= -4 + x2) + (-4 + x2) * (-4 + x2 > 0)) *
(0 * (0 >= -4 + x2) + (-4 + x2) * (-4 + x2 > 0) < 3)) + 16 * x0 +
48 * x3 + (3 * (3 <= 0 * (0 >= -4 + x1) + (-4 + x1) * (-4 + x1 > 0)
) + (0 * (0 >= -4 + x1) + (-4 + x1) * (-4 + x1 > 0)) * (0 * (0 >= -
4 + x1) + (-4 + x1) * (-4 + x1 > 0) < 3))), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_replication_pad2d_rsub_1(in_ptr0,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 27 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 9 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(arg1_1, (3, 1, 10, 10), (100, 100, 10, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 12, 12), (432, 1, 36, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_mul_replication_pad2d_rsub_0[grid(1728)](arg0_1,
buf0, 1728, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf1, (4, 3, 3, 3), (27, 1, 9, 3))
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 3, 3, 3), (27, 9, 3, 1), torch.float32)
triton_poi_fused_convolution_mul_replication_pad2d_rsub_1[grid(12, 9)](
buf1, buf2, 12, 9, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return buf2,
class OneDilateNew(nn.Module):
def __init__(self, kernel_size=10, channels=3, gpu=True):
super(OneDilateNew, self).__init__()
self.kernel_size = kernel_size
self.channels = channels
gaussian_kernel = torch.ones(1, 1, self.kernel_size, self.kernel_size)
gaussian_kernel = gaussian_kernel.repeat(self.channels, 1, 1, 1)
self.mean = (self.kernel_size - 1) // 2
self.gaussian_filter = nn.Conv2d(in_channels=self.channels,
out_channels=self.channels, kernel_size=self.kernel_size,
groups=self.channels, bias=False)
if gpu:
gaussian_kernel = gaussian_kernel
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, input_0):
arg1_1 = self.gaussian_filter.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
williamyang1991/DeepPS
|
OneDilate
| false | 11,093 |
[
"MIT"
] | 0 |
f3eb6ba4b0f2ef068361a4bbbd3d6c2c2f6726b4
|
https://github.com/williamyang1991/DeepPS/tree/f3eb6ba4b0f2ef068361a4bbbd3d6c2c2f6726b4
|
BasicDeconv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fc/cfc2ly6hdnwcliv24f2k6prmw2piqgg6spz6xhzakrvpl323svai.py
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 784, grid=grid(784), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicDeconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconv, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.tconv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(784)](buf1,
primals_2, buf2, 784, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class BasicDeconvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
use_bn=False):
super(BasicDeconvNew, self).__init__()
self.use_bn = use_bn
self.tconv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, bias=not self.use_bn)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, input_0):
primals_1 = self.tconv.weight
primals_2 = self.tconv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vghost2008/C-3-Framework
|
BasicDeconv
| false | 11,094 |
[
"MIT"
] | 0 |
dc6f1f67e403aff4dbb60f8ed06461c843407501
|
https://github.com/vghost2008/C-3-Framework/tree/dc6f1f67e403aff4dbb60f8ed06461c843407501
|
Block_MLP
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/w3/cw34ecblrjywdduwwge3aadqjo3z4k2b7rzmrjodmdmh6egawevs.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_1 => add_2, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_2), kwargs = {})
triton_poi_fused_gelu_2 = async_compile.triton('triton_poi_fused_gelu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/b5/cb5kfengtwtso2cktophcv2j56lrz4uz52si4koheb5liewfkvtt.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_5 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu]
triton_poi_fused_gelu_2.run(buf3, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_3, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block_MLP(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm, layerscale=0.0, train_scale=True):
super().__init__()
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.layerscale = layerscale
if layerscale > 0.0:
if train_scale:
self.gamma = nn.Parameter(layerscale * torch.ones(dim),
requires_grad=True)
else:
self.gamma = nn.Parameter(layerscale * torch.ones(dim),
requires_grad=False)
def forward(self, x):
if self.layerscale > 0.0:
x = x + self.drop_path(self.gamma * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_gelu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_3, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0
), primals_6, primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block_MLPNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm, layerscale=0.0, train_scale=True):
super().__init__()
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.layerscale = layerscale
if layerscale > 0.0:
if train_scale:
self.gamma = nn.Parameter(layerscale * torch.ones(dim),
requires_grad=True)
else:
self.gamma = nn.Parameter(layerscale * torch.ones(dim),
requires_grad=False)
def forward(self, input_0):
primals_1 = self.norm2.weight
primals_2 = self.norm2.bias
primals_4 = self.mlp.fc1.weight
primals_5 = self.mlp.fc1.bias
primals_6 = self.mlp.fc2.weight
primals_7 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yifanc96/yifanc-DL
|
Block_MLP
| false | 11,095 |
[
"MIT"
] | 0 |
25d56cec776fb151c8f6bcbd997bca94f07f3597
|
https://github.com/yifanc96/yifanc-DL/tree/25d56cec776fb151c8f6bcbd997bca94f07f3597
|
ParityPonderGRU
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# h => full_default
# Graph fragment:
# %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/zw/czwxbu6nuxhoujusu3krfetmqzx7rxloioah6gicy4ie2wmv6tqi.py
# Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_3 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_8, %primals_8, %primals_8, %primals_8, %primals_6, %primals_6, %primals_6],), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr1 + (x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 7, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr1 + (x0), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + (x2), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/hq/chq2ihz43k5f3canufmvrzz3vy4seaa7ohwwzmv2o6szsqgwtn4r.py
# Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_4 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_9, %primals_9, %primals_9, %primals_9, %primals_7, %primals_7, %primals_7],), kwargs = {})
triton_poi_fused_stack_2 = async_compile.triton('triton_poi_fused_stack_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp23 = tl.load(in_ptr1 + (0))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tmp0 >= tmp3
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp0 >= tmp8
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp0 >= tmp12
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tmp0 >= tmp16
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp25 = tmp0 >= tmp20
tmp26 = tl.full([1], 6, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp0 >= tmp26
tmp30 = tl.full([1], 7, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.where(tmp28, tmp24, tmp24)
tmp33 = tl.where(tmp22, tmp24, tmp32)
tmp34 = tl.where(tmp18, tmp6, tmp33)
tmp35 = tl.where(tmp14, tmp6, tmp34)
tmp36 = tl.where(tmp10, tmp6, tmp35)
tmp37 = tl.where(tmp4, tmp6, tmp36)
tl.store(out_ptr0 + (x0), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/g2/cg23xreycdv6fxtkdfop75dzuhabmjs3xhrvq22ytdld2qihmoyg.py
# Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4, %getitem_6, %getitem, %getitem_2, %getitem_4],), kwargs = {})
triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (x0 + (4*((-16) + x1))), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + (4*((-20) + x1))), tmp29 & xmask, other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 28, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr2 + (x0 + (4*((-24) + x1))), tmp31 & xmask, other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + (x2), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/oo/coo3437vo4xsrwedl7lcrwfswym3k6uq6mzbtutpnydbupg5rgob.py
# Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones]
# Source node to ATen node mapping:
# bernoulli => lt_2
# bernoulli_1 => convert_element_type_1, lt_1
# bernoulli_2 => convert_element_type, lt
# bernoulli_3 => convert_element_type_3, lt_3
# halt_1 => mul_10
# halt_2 => mul_13
# halt_3 => mul_23
# halted_1 => convert_element_type_2
# halted_2 => add_4
# halted_3 => add_6
# lambda_n_3 => full_default_6
# mul_10 => mul_11
# mul_11 => mul_12
# mul_12 => mul_19
# mul_13 => mul_17
# mul_17 => mul_14
# mul_18 => mul_15
# mul_19 => mul_20
# mul_20 => mul_16
# mul_24 => mul_24
# mul_25 => mul_25
# mul_26 => mul_26
# mul_27 => mul_27
# mul_3 => mul_7
# mul_4 => mul_8
# mul_6 => mul_18
# p_m => full_default_3
# p_m_1 => add_1
# p_m_2 => add_3
# p_m_3 => add_5
# p_m_4 => add_10
# p_n_1 => mul_2
# p_n_2 => mul_4
# p_n_3 => mul_5
# sigmoid => sigmoid_2
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid
# sub_10 => sub_10
# sub_13 => sub_13
# sub_14 => sub_14
# sub_2 => sub_4
# sub_4 => sub_2
# sub_6 => sub_7
# sub_8 => sub_1
# sub_9 => sub_9
# un_halted_prob_1 => sub_3
# un_halted_prob_2 => mul_3
# y_m_1 => add_7
# y_m_2 => add_8
# y_m_3 => add_9
# y_m_4 => add_11
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_14,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select), kwargs = {})
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand, %select), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_13,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {})
# %lt_1 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_1, %select_1), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_1, torch.float32), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_12,), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_2), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %select_1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_2), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %select), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %lt_2 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_2, %select_2), kwargs = {})
# %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_2, torch.float32), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_2), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default_3, %sub_4), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %convert_element_type_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_8), kwargs = {})
# %mul_10 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %sub_4), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sub_7), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %mul_10), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %mul_12), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, %mul_10), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_4), kwargs = {})
# %mul_13 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %sub_9), kwargs = {})
# %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_13), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %sub_10), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %mul_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %mul_15), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_13), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %mul_13), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %mul_10), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %convert_element_type_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_18), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %sub_7), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %mul_17), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, %sub_10), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_20, %mul_16), kwargs = {})
# %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %lt_3 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_3, %full_default_6), kwargs = {})
# %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_3, torch.float32), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_6), kwargs = {})
# %mul_23 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_3, %sub_13), kwargs = {})
# %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_23), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %sub_14), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %mul_23), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, %mul_25), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, %sub_14), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %mul_23), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {})
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4 = async_compile.triton('triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*i1', 13: '*i1', 14: '*i1', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (4))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (5))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (20 + x0), xmask)
tmp13 = tl.load(in_ptr1 + (6))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp15 = tl.load(in_ptr2 + (24 + x0), xmask)
tmp18 = tl.load(in_ptr3 + (x0), xmask)
tmp20 = tl.load(in_ptr4 + (x0), xmask)
tmp22 = tl.load(in_ptr5 + (x0), xmask)
tmp59 = tl.load(in_ptr1 + (0))
tmp60 = tl.broadcast_to(tmp59, [XBLOCK])
tmp61 = tl.load(in_ptr2 + (x0), xmask)
tmp66 = tl.load(in_ptr1 + (1))
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp68 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp73 = tl.load(in_ptr1 + (2))
tmp74 = tl.broadcast_to(tmp73, [XBLOCK])
tmp75 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp80 = tl.load(in_ptr1 + (3))
tmp81 = tl.broadcast_to(tmp80, [XBLOCK])
tmp82 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 < tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp18 < tmp17
tmp21 = tmp20 < tmp12
tmp23 = tmp22 < tmp7
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp1 - tmp24
tmp26 = 0.0
tmp27 = tmp26 * tmp25
tmp28 = tmp7 * tmp24
tmp29 = tmp27 + tmp28
tmp30 = tmp21.to(tl.float32)
tmp31 = tmp30 * tmp25
tmp32 = tmp1 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp1 - tmp7
tmp35 = tmp34 * tmp12
tmp36 = tmp35 * tmp31
tmp37 = tmp33 + tmp36
tmp38 = tmp19.to(tl.float32)
tmp39 = tmp24 + tmp31
tmp40 = tmp1 - tmp39
tmp41 = tmp38 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp1 - tmp12
tmp45 = tmp34 * tmp44
tmp46 = tmp45 * tmp17
tmp47 = tmp46 * tmp41
tmp48 = tmp43 + tmp47
tmp49 = tmp2.to(tl.float32)
tmp50 = tmp39 + tmp41
tmp51 = tmp1 - tmp50
tmp52 = tmp49 * tmp51
tmp53 = tmp1 - tmp52
tmp54 = tmp48 * tmp53
tmp55 = tmp1 - tmp17
tmp56 = tmp45 * tmp55
tmp57 = tmp56 * tmp52
tmp58 = tmp54 + tmp57
tmp62 = tmp60 + tmp61
tmp63 = tmp62 * tmp24
tmp64 = tmp27 + tmp63
tmp65 = tmp64 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 * tmp31
tmp71 = tmp65 + tmp70
tmp72 = tmp71 * tmp42
tmp76 = tmp74 + tmp75
tmp77 = tmp76 * tmp41
tmp78 = tmp72 + tmp77
tmp79 = tmp78 * tmp53
tmp83 = tmp81 + tmp82
tmp84 = tmp83 * tmp52
tmp85 = tmp79 + tmp84
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
tl.store(out_ptr2 + (x0), tmp12, xmask)
tl.store(out_ptr3 + (x0), tmp17, xmask)
tl.store(out_ptr4 + (x0), tmp19, xmask)
tl.store(out_ptr5 + (x0), tmp21, xmask)
tl.store(out_ptr6 + (x0), tmp23, xmask)
tl.store(in_out_ptr0 + (x0), tmp58, xmask)
tl.store(in_out_ptr1 + (x0), tmp85, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jy/cjys3iopkrrusczes7dimlrpecwecndhklisdis5ynngdb4dpi3f.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat_3
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %mul_2, %mul_4, %mul_5],), kwargs = {})
triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tmp13 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 12, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp11 - tmp21
tmp23 = tl.load(in_ptr1 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp11 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tl.load(in_ptr2 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tmp0 >= tmp18
tmp31 = tl.full([1], 16, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tl.load(in_ptr0 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp11 - tmp33
tmp35 = tl.load(in_ptr1 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp11 - tmp35
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr2 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp11 - tmp38
tmp40 = tmp37 * tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp30, tmp40, tmp41)
tmp43 = tl.where(tmp20, tmp29, tmp42)
tmp44 = tl.where(tmp9, tmp16, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x0), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/o4/co4faoo6vn5p4s7le2cjisym3ezxxx6wg265nankhpf5szhdz3rv.py
# Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_1 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_5, %select_4, %select_3, %select_6],), kwargs = {})
triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (1))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp25 = tl.load(in_ptr0 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp17 = tl.load(in_ptr1 + (4 + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp27 = tl.load(in_ptr1 + (8 + ((-8) + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tmp32 = tl.full([1], 16, tl.int64)
tmp33 = tmp0 < tmp32
tmp36 = tl.load(in_ptr1 + (12 + ((-12) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tl.store(out_ptr0 + (x0), tmp42, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, ), (1, ))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten._thnn_fused_gru_cell]
buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5)
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(primals_8, primals_6, buf6, 28, grid=grid(28), stream=stream0)
del primals_6
del primals_8
buf7 = empty_strided_cuda((7, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack]
triton_poi_fused_stack_2.run(primals_9, primals_7, buf7, 7, grid=grid(7), stream=stream0)
del primals_7
del primals_9
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.mm]
extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8)
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten._thnn_fused_gru_cell]
buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5)
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten.mm]
extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12)
# Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten._thnn_fused_gru_cell]
buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5)
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten.mm]
extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16)
# Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten._thnn_fused_gru_cell]
buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5)
del buf1
del buf16
del primals_4
del primals_5
buf18 = buf17[0]
buf19 = buf17[1]
del buf17
buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack]
triton_poi_fused_stack_3.run(buf4, buf10, buf14, buf18, buf20, 112, grid=grid(112), stream=stream0)
buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [baddbmm], Original ATen: [aten.baddbmm]
extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21)
# Topologically Sorted Source Nodes: [bernoulli_2], Original ATen: [aten.bernoulli]
buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf24 = buf23
del buf23
# Topologically Sorted Source Nodes: [bernoulli_1], Original ATen: [aten.bernoulli]
buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf28 = buf27
del buf27
# Topologically Sorted Source Nodes: [bernoulli], Original ATen: [aten.bernoulli]
buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf32 = buf31
del buf31
# Topologically Sorted Source Nodes: [bernoulli_3], Original ATen: [aten.bernoulli]
buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf25 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf29 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf33 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf34 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf39 = buf34; del buf34 # reuse
buf35 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf40 = buf35; del buf35 # reuse
# Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones]
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4.run(buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, grid=grid(4), stream=stream0)
del buf24
del buf28
del buf32
del buf37
buf41 = reinterpret_tensor(buf18, (16, ), (1, ), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
triton_poi_fused_stack_5.run(buf30, buf26, buf22, buf41, 16, grid=grid(16), stream=stream0)
buf42 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack]
triton_poi_fused_stack_6.run(buf7, buf21, buf42, 16, grid=grid(16), stream=stream0)
del buf21
del buf7
return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4), (16, 1, 4), 0), primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
from torch import nn
from typing import Tuple
import torch.utils.data
import torch.nn.functional
import torch.autograd
class ParityPonderGRU(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor,
torch.Tensor, torch.Tensor]:
"""
* `x` is the input of shape `[batch_size, n_elems]`
This outputs a tuple of four tensors:
1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]`
2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$
3. $p_m$ of shape `[batch_size]`
4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$
"""
batch_size = x.shape[0]
h = x.new_zeros((x.shape[0], self.n_hidden))
h = self.gru(x, h)
p = []
y = []
un_halted_prob = h.new_ones((batch_size,))
halted = h.new_zeros((batch_size,))
p_m = h.new_zeros((batch_size,))
y_m = h.new_zeros((batch_size,))
for n in range(1, self.max_steps + 1):
if n == self.max_steps:
lambda_n = h.new_ones(h.shape[0])
else:
lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0]
y_n = self.output_layer(h)[:, 0]
p_n = un_halted_prob * lambda_n
un_halted_prob = un_halted_prob * (1 - lambda_n)
halt = torch.bernoulli(lambda_n) * (1 - halted)
p.append(p_n)
y.append(y_n)
p_m = p_m * (1 - halt) + p_n * halt
y_m = y_m * (1 - halt) + y_n * halt
halted = halted + halt
h = self.gru(x, h)
if self.is_halt and halted.sum() == batch_size:
break
return torch.stack(p), torch.stack(y), p_m, y_m
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 7, tl.int64)
tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 7
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp23 = tl.load(in_ptr1 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tmp0 >= tmp3
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp0 >= tmp8
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp0 >= tmp12
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tmp0 >= tmp16
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp25 = tmp0 >= tmp20
tmp26 = tl.full([1], 6, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tl.full([1], 7, tl.int64)
tmp32 = tl.where(tmp28, tmp24, tmp24)
tmp33 = tl.where(tmp22, tmp24, tmp32)
tmp34 = tl.where(tmp18, tmp6, tmp33)
tmp35 = tl.where(tmp14, tmp6, tmp34)
tmp36 = tl.where(tmp10, tmp6, tmp35)
tmp37 = tl.where(tmp4, tmp6, tmp36)
tl.store(out_ptr0 + x0, tmp37, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 28, tl.int64)
tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 4)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp8 = tl.load(in_ptr1 + 5)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (20 + x0), xmask)
tmp13 = tl.load(in_ptr1 + 6)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp15 = tl.load(in_ptr2 + (24 + x0), xmask)
tmp18 = tl.load(in_ptr3 + x0, xmask)
tmp20 = tl.load(in_ptr4 + x0, xmask)
tmp22 = tl.load(in_ptr5 + x0, xmask)
tmp59 = tl.load(in_ptr1 + 0)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK])
tmp61 = tl.load(in_ptr2 + x0, xmask)
tmp66 = tl.load(in_ptr1 + 1)
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp68 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp73 = tl.load(in_ptr1 + 2)
tmp74 = tl.broadcast_to(tmp73, [XBLOCK])
tmp75 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp80 = tl.load(in_ptr1 + 3)
tmp81 = tl.broadcast_to(tmp80, [XBLOCK])
tmp82 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 < tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp18 < tmp17
tmp21 = tmp20 < tmp12
tmp23 = tmp22 < tmp7
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp1 - tmp24
tmp26 = 0.0
tmp27 = tmp26 * tmp25
tmp28 = tmp7 * tmp24
tmp29 = tmp27 + tmp28
tmp30 = tmp21.to(tl.float32)
tmp31 = tmp30 * tmp25
tmp32 = tmp1 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp1 - tmp7
tmp35 = tmp34 * tmp12
tmp36 = tmp35 * tmp31
tmp37 = tmp33 + tmp36
tmp38 = tmp19.to(tl.float32)
tmp39 = tmp24 + tmp31
tmp40 = tmp1 - tmp39
tmp41 = tmp38 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp1 - tmp12
tmp45 = tmp34 * tmp44
tmp46 = tmp45 * tmp17
tmp47 = tmp46 * tmp41
tmp48 = tmp43 + tmp47
tmp49 = tmp2.to(tl.float32)
tmp50 = tmp39 + tmp41
tmp51 = tmp1 - tmp50
tmp52 = tmp49 * tmp51
tmp53 = tmp1 - tmp52
tmp54 = tmp48 * tmp53
tmp55 = tmp1 - tmp17
tmp56 = tmp45 * tmp55
tmp57 = tmp56 * tmp52
tmp58 = tmp54 + tmp57
tmp62 = tmp60 + tmp61
tmp63 = tmp62 * tmp24
tmp64 = tmp27 + tmp63
tmp65 = tmp64 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 * tmp31
tmp71 = tmp65 + tmp70
tmp72 = tmp71 * tmp42
tmp76 = tmp74 + tmp75
tmp77 = tmp76 * tmp41
tmp78 = tmp72 + tmp77
tmp79 = tmp78 * tmp53
tmp83 = tmp81 + tmp82
tmp84 = tmp83 * tmp52
tmp85 = tmp79 + tmp84
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
tl.store(out_ptr2 + x0, tmp12, xmask)
tl.store(out_ptr3 + x0, tmp17, xmask)
tl.store(out_ptr4 + x0, tmp19, xmask)
tl.store(out_ptr5 + x0, tmp21, xmask)
tl.store(out_ptr6 + x0, tmp23, xmask)
tl.store(in_out_ptr0 + x0, tmp58, xmask)
tl.store(in_out_ptr1 + x0, tmp85, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 12, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp11 - tmp21
tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp11 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tmp0 >= tmp18
tl.full([1], 16, tl.int64)
tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp11 - tmp33
tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp11 - tmp35
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp39 = tmp11 - tmp38
tmp40 = tmp37 * tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp30, tmp40, tmp41)
tmp43 = tl.where(tmp20, tmp29, tmp42)
tmp44 = tl.where(tmp9, tmp16, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp15 = tl.load(in_ptr0 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp25 = tl.load(in_ptr0 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tl.store(out_ptr0 + x0, tmp42, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf2)
buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0,
primals_4, primals_5)
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32)
triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_6
del primals_8
buf7 = empty_strided_cuda((7,), (1,), torch.float32)
triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7,
XBLOCK=8, num_warps=1, num_stages=1)
del primals_7
del primals_9
buf8 = buf2
del buf2
extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf8)
buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4,
primals_4, primals_5)
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = buf8
del buf8
extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf12)
buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12,
buf10, primals_4, primals_5)
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = buf12
del buf12
extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf16)
buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16,
buf14, primals_4, primals_5)
del buf1
del buf16
del primals_4
del primals_5
buf18 = buf17[0]
buf19 = buf17[1]
del buf17
buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32)
triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18,
buf20, 112, XBLOCK=128, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21)
buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf24 = buf23
del buf23
buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf28 = buf27
del buf27
buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf32 = buf31
del buf31
buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4,), (1,), torch.bool)
buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf25 = empty_strided_cuda((4,), (1,), torch.bool)
buf29 = empty_strided_cuda((4,), (1,), torch.bool)
buf33 = empty_strided_cuda((4,), (1,), torch.bool)
buf34 = empty_strided_cuda((4,), (1,), torch.float32)
buf39 = buf34
del buf34
buf35 = empty_strided_cuda((4,), (1,), torch.float32)
buf40 = buf35
del buf35
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[
grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32,
buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf24
del buf28
del buf32
del buf37
buf41 = reinterpret_tensor(buf18, (16,), (1,), 0)
del buf18
triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf21
del buf7
return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0),
reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40,
primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19,
buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor
(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4
), (16, 1, 4), 0), primals_3)
class ParityPonderGRUNew(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, input_0):
primals_2 = self.gru.weight_ih
primals_3 = self.gru.weight_hh
primals_4 = self.gru.bias_ih
primals_5 = self.gru.bias_hh
primals_6 = self.output_layer.weight
primals_7 = self.output_layer.bias
primals_8 = self.lambda_layer.weight
primals_9 = self.lambda_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1], output[2], output[3]
|
ppvalluri09/annotated_deep_learning_paper_implementations
|
ParityPonderGRU
| false | 11,096 |
[
"MIT"
] | 0 |
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
|
GaussianFilter
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/qq/cqq5lsnh6vt6557bounpfgnl6t7pzf7kp656kgxbydvrgd35lsd3.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [6, 6], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/t2/ct25onnj2rrutfldwxrd2wns25gwitfsqxfoxgr5j3xsf2pxdzv5.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [6, 6], [1, 1], False, [0, 0], 3), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (12288*y1)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp0, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 13, 13), (169, 169, 13, 1))
assert_size_stride(arg1_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(arg1_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del arg1_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1), padding=(6, 6), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf1, (4, 3, 64, 64), (12288, 1, 192, 3))
del arg0_1
buf2 = reinterpret_tensor(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf1, buf2, 12, 4096, grid=grid(12, 4096), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((3, 1, 13, 13), (169, 169, 13, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch
from torch import nn
class GaussianFilter(nn.Module):
def __init__(self, kernel_size=13, stride=1, padding=6):
super(GaussianFilter, self).__init__()
mean = (kernel_size - 1) / 2.0
variance = ((kernel_size - 1) / 6.0) ** 2.0
x_coord = torch.arange(kernel_size)
x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()
gaussian_kernel = torch.exp(-torch.sum((xy_grid - mean) ** 2.0, dim
=-1) / (2 * variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
self.gaussian_filter = nn.Conv2d(3, 3, kernel_size, stride=stride,
padding=padding, groups=3, bias=False)
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, x):
return self.gaussian_filter(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp0, ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 13, 13), (169, 169, 13, 1))
assert_size_stride(arg1_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(12, 4096)](arg1_1, buf0, 12,
4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1),
padding=(6, 6), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=3, bias=None)
assert_size_stride(buf1, (4, 3, 64, 64), (12288, 1, 192, 3))
del arg0_1
buf2 = reinterpret_tensor(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1), 0
)
del buf0
triton_poi_fused_convolution_1[grid(12, 4096)](buf1, buf2, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return buf2,
class GaussianFilterNew(nn.Module):
def __init__(self, kernel_size=13, stride=1, padding=6):
super(GaussianFilterNew, self).__init__()
mean = (kernel_size - 1) / 2.0
variance = ((kernel_size - 1) / 6.0) ** 2.0
x_coord = torch.arange(kernel_size)
x_grid = x_coord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).float()
gaussian_kernel = torch.exp(-torch.sum((xy_grid - mean) ** 2.0, dim
=-1) / (2 * variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)
self.gaussian_filter = nn.Conv2d(3, 3, kernel_size, stride=stride,
padding=padding, groups=3, bias=False)
self.gaussian_filter.weight.data = gaussian_kernel
self.gaussian_filter.weight.requires_grad = False
def forward(self, input_0):
arg0_1 = self.gaussian_filter.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
zsameem/real-world-sr
|
GaussianFilter
| false | 11,097 |
[
"MIT"
] | 0 |
ed108f3fd2fe4090c18c871c143f30f480de8fb6
|
https://github.com/zsameem/real-world-sr/tree/ed108f3fd2fe4090c18c871c143f30f480de8fb6
|
AdaptiveInstanceNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/3g/c3gbtbflqsjqgsddw3vxujyeiz7n7yqa2yck5o4n6wm34w67wrid.py
# Topologically Sorted Source Nodes: [out, mul, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# out => add, rsqrt, var_mean
# out_1 => add_1
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, style_dim, in_channel):
super(AdaptiveInstanceNorm, self).__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = nn.Linear(style_dim, in_channel * 2)
self.style.bias.data[:in_channel] = 1
self.style.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'style_dim': 4, 'in_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, style_dim, in_channel):
super(AdaptiveInstanceNormNew, self).__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = nn.Linear(style_dim, in_channel * 2)
self.style.bias.data[:in_channel] = 1
self.style.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_1 = self.style.weight
primals_2 = self.style.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
williamyang1991/DeepPS
|
AdaptiveInstanceNorm
| false | 11,098 |
[
"MIT"
] | 0 |
f3eb6ba4b0f2ef068361a4bbbd3d6c2c2f6726b4
|
https://github.com/williamyang1991/DeepPS/tree/f3eb6ba4b0f2ef068361a4bbbd3d6c2c2f6726b4
|
ResidualBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/la/clalnn5iz2syotwgvds5fjb6mtcklh5yizks6zdxu552jin7zbwe.py
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# residual => convolution
# residual_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/gz/cgz3i5sfiy2r2zcw7on5c3vgziypvqy3tqtrdk4orqs3phgqxlob.py
# Topologically Sorted Source Nodes: [residual_2, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add
# residual_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten._prelu_kernel]
stream0 = get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [residual_2, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf4, primals_3, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.prelu(residual)
residual = self.conv2(residual)
return x + residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1,
primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_1[grid(256)](buf4, primals_3,
primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf4, primals_1, primals_3, primals_4, primals_5, buf1, buf2
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.prelu.weight
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
zsameem/real-world-sr
|
ResidualBlock
| false | 11,099 |
[
"MIT"
] | 0 |
ed108f3fd2fe4090c18c871c143f30f480de8fb6
|
https://github.com/zsameem/real-world-sr/tree/ed108f3fd2fe4090c18c871c143f30f480de8fb6
|
SAModule_Head
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/xj/cxjxsxuqu7utclzyuepnlpraugwu73em67oyrv7oapys5jwyvfsb.py
# Topologically Sorted Source Nodes: [x_1, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# out => cat
# x_1 => add, repeat, repeat_1, rsqrt, var_mean
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_8, %view_13, %view_18], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_repeat_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_repeat_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + (16*x0)), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp25, xmask)
tl.store(out_ptr3 + ((4*r1) + (64*x0)), tmp31, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tm/ctmmtqrsskonejfnwca4me77icl4owmomtvtjun27tsomvqer26c.py
# Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# out => cat
# x_3 => add_2, repeat_2, repeat_3, rsqrt_1, var_mean_1
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %repeat_3 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_7, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_8, %view_13, %view_18], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_repeat_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_repeat_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + (16*x0)), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp25, xmask)
tl.store(out_ptr3 + ((4*r1) + (64*x0)), tmp31, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/yh/cyhjdw2dcue5if5ifsjdpd4bkhwx2l6kbamh4nhxjksmiwxcrllp.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_8, %view_13, %view_18], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x2 + (16*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (1, ), (1, ))
assert_size_stride(primals_11, (1, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_12, (1, ), (1, ))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf6 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf4 # reuse
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf28 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 0) # alias
# Topologically Sorted Source Nodes: [x_1, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_cat_repeat_0.run(buf6, primals_3, primals_4, buf0, buf1, buf2, buf3, buf28, 4, 16, grid=grid(4), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
buf8 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf10 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf11 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf13 = reinterpret_tensor(buf11, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf11 # reuse
buf29 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 1) # alias
# Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_repeat_1.run(buf13, primals_6, primals_7, buf7, buf8, buf9, buf10, buf29, 4, 16, grid=grid(4), stream=stream0)
del primals_6
del primals_7
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(primals_2, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 1, 4, 4), (16, 16, 4, 1))
buf15 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf16 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf17 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf18 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf20 = reinterpret_tensor(buf18, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf18 # reuse
buf30 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 2) # alias
# Topologically Sorted Source Nodes: [x_5, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_repeat_1.run(buf20, primals_9, primals_10, buf14, buf15, buf16, buf17, buf30, 4, 16, grid=grid(4), stream=stream0)
del primals_10
del primals_9
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(primals_2, primals_11, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 1, 4, 4), (16, 16, 4, 1))
buf22 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf23 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf24 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf25 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf27 = reinterpret_tensor(buf25, (1, 4, 1, 1), (4, 1, 1, 1), 0); del buf25 # reuse
buf31 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 3) # alias
# Topologically Sorted Source Nodes: [x_7, out], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_repeat_1.run(buf27, primals_12, primals_13, buf21, buf22, buf23, buf24, buf31, 4, 16, grid=grid(4), stream=stream0)
del primals_12
del primals_13
buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf32, buf33, 16, 16, grid=grid(16, 16), stream=stream0)
del buf28
del buf29
del buf30
del buf31
del buf32
return (buf33, primals_1, primals_2, primals_5, primals_8, primals_11, buf0, buf1, buf2, buf3, buf6, buf7, buf8, buf9, buf10, buf13, buf14, buf15, buf16, buf17, buf20, buf21, buf22, buf23, buf24, buf27, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False, **kwargs):
super(BasicConv, self).__init__()
self.use_bn = use_bn
self.conv = nn.Conv2d(in_channels, out_channels, bias=not self.
use_bn, **kwargs)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
class SAModule_Head(nn.Module):
def __init__(self, in_channels, out_channels, use_bn):
super(SAModule_Head, self).__init__()
branch_out = out_channels // 4
self.branch1x1 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=1)
self.branch3x3 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=3, padding=1)
self.branch5x5 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=5, padding=2)
self.branch7x7 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=7, padding=3)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3(x)
branch5x5 = self.branch5x5(x)
branch7x7 = self.branch7x7(x)
out = torch.cat([branch1x1, branch3x3, branch5x5, branch7x7], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'use_bn': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp25, xmask)
tl.store(out_ptr3 + (4 * r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_repeat_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, 1])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, 1])
tmp4 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp4 - tmp14
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp28 + tmp3
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp25, xmask)
tl.store(out_ptr3 + (4 * r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (1, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_12, (1,), (1,))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf6 = reinterpret_tensor(buf4, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf4
buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf28 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 0)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_cat_repeat_0[grid(4)](buf6,
primals_3, primals_4, buf0, buf1, buf2, buf3, buf28, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
del primals_4
buf7 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf11 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf13 = reinterpret_tensor(buf11, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf11
buf29 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 1)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf13,
primals_6, primals_7, buf7, buf8, buf9, buf10, buf29, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_6
del primals_7
buf14 = extern_kernels.convolution(primals_2, primals_8, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 1, 4, 4), (16, 16, 4, 1))
buf15 = empty_strided_cuda((4,), (1,), torch.float32)
buf16 = empty_strided_cuda((4,), (1,), torch.float32)
buf17 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf18 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf20 = reinterpret_tensor(buf18, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf18
buf30 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 2)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf20,
primals_9, primals_10, buf14, buf15, buf16, buf17, buf30, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_10
del primals_9
buf21 = extern_kernels.convolution(primals_2, primals_11, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 1, 4, 4), (16, 16, 4, 1))
buf22 = empty_strided_cuda((4,), (1,), torch.float32)
buf23 = empty_strided_cuda((4,), (1,), torch.float32)
buf24 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf25 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf27 = reinterpret_tensor(buf25, (1, 4, 1, 1), (4, 1, 1, 1), 0)
del buf25
buf31 = reinterpret_tensor(buf32, (4, 1, 4, 4), (64, 1, 16, 4), 3)
triton_per_fused__native_batch_norm_legit_cat_repeat_1[grid(4)](buf27,
primals_12, primals_13, buf21, buf22, buf23, buf24, buf31, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_12
del primals_13
buf33 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_2[grid(16, 16)](buf32, buf33, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del buf28
del buf29
del buf30
del buf31
del buf32
return (buf33, primals_1, primals_2, primals_5, primals_8, primals_11,
buf0, buf1, buf2, buf3, buf6, buf7, buf8, buf9, buf10, buf13, buf14,
buf15, buf16, buf17, buf20, buf21, buf22, buf23, buf24, buf27)
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False, **kwargs):
super(BasicConv, self).__init__()
self.use_bn = use_bn
self.conv = nn.Conv2d(in_channels, out_channels, bias=not self.
use_bn, **kwargs)
self.bn = nn.InstanceNorm2d(out_channels, affine=True
) if self.use_bn else None
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return F.relu(x, inplace=True)
class SAModule_HeadNew(nn.Module):
def __init__(self, in_channels, out_channels, use_bn):
super(SAModule_HeadNew, self).__init__()
branch_out = out_channels // 4
self.branch1x1 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=1)
self.branch3x3 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=3, padding=1)
self.branch5x5 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=5, padding=2)
self.branch7x7 = BasicConv(in_channels, branch_out, use_bn=use_bn,
kernel_size=7, padding=3)
def forward(self, input_0):
primals_1 = self.branch1x1.conv.weight
primals_3 = self.branch1x1.bn.weight
primals_4 = self.branch1x1.bn.bias
primals_5 = self.branch3x3.conv.weight
primals_6 = self.branch3x3.bn.weight
primals_7 = self.branch3x3.bn.bias
primals_8 = self.branch5x5.conv.weight
primals_9 = self.branch5x5.bn.weight
primals_10 = self.branch5x5.bn.bias
primals_11 = self.branch7x7.conv.weight
primals_12 = self.branch7x7.bn.weight
primals_13 = self.branch7x7.bn.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
vghost2008/C-3-Framework
|
SAModule_Head
| false | 11,100 |
[
"MIT"
] | 0 |
dc6f1f67e403aff4dbb60f8ed06461c843407501
|
https://github.com/vghost2008/C-3-Framework/tree/dc6f1f67e403aff4dbb60f8ed06461c843407501
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.